diff --git a/README.md b/README.md index 74a1e50cd..14d9686aa 100644 --- a/README.md +++ b/README.md @@ -40,7 +40,7 @@ to help you get it set up. To get started, you'll need: -* Ruby 2.6.0 +* Ruby 3.1.0 * You can use `rbenv` to manage your Ruby version(s) * [`Bundler`](https://github.com/bundler/bundler) * This can be installed with `gem install bundler` @@ -59,7 +59,7 @@ that directory. To get started right away, use the bootstrap script with: ```bash -./tools/bootstrap +./scripts/bootstrap ``` --- @@ -90,7 +90,7 @@ bundle install Now, you can verify you're ready with: ```bash -./tools/doctor +./scripts/doctor ``` ### Generating the Terraform Providers diff --git a/mmv1/.ruby-version b/mmv1/.ruby-version index e70b4523a..fd2a01863 100644 --- a/mmv1/.ruby-version +++ b/mmv1/.ruby-version @@ -1 +1 @@ -2.6.0 +3.1.0 diff --git a/mmv1/Gemfile.lock b/mmv1/Gemfile.lock index e0cdbde27..f1eedbea8 100644 --- a/mmv1/Gemfile.lock +++ b/mmv1/Gemfile.lock @@ -1,55 +1,78 @@ GEM remote: https://rubygems.org/ specs: - activesupport (5.2.4.3) + activesupport (7.1.2) + base64 + bigdecimal concurrent-ruby (~> 1.0, >= 1.0.2) - i18n (>= 0.7, < 2) - minitest (~> 5.1) - tzinfo (~> 1.1) - addressable (2.5.2) - public_suffix (>= 2.0.2, < 4.0) - ast (2.4.0) - binding_of_caller (0.8.0) + connection_pool (>= 2.2.5) + drb + i18n (>= 1.6, < 2) + minitest (>= 5.1) + mutex_m + tzinfo (~> 2.0) + addressable (2.8.5) + public_suffix (>= 2.0.2, < 6.0) + ast (2.4.2) + base64 (0.2.0) + bigdecimal (3.1.4) + binding_of_caller (1.0.0) debug_inspector (>= 0.0.1) - concurrent-ruby (1.1.6) - debug_inspector (0.0.3) - diff-lcs (1.3) - faraday (0.15.4) - multipart-post (>= 1.2, < 3) - i18n (1.8.2) + concurrent-ruby (1.2.2) + connection_pool (2.4.1) + debug_inspector (1.1.0) + diff-lcs (1.5.0) + drb (2.2.0) + ruby2_keywords + faraday (2.7.12) + base64 + faraday-net_http (>= 2.0, < 3.1) + ruby2_keywords (>= 0.0.4) + faraday-net_http (3.0.2) + i18n (1.14.1) concurrent-ruby (~> 1.0) - jaro_winkler (1.5.4) + json (2.7.1) + language_server-protocol (3.17.0.3) metaclass (0.0.4) - minitest (5.14.1) + minitest (5.20.0) mocha (1.3.0) metaclass (~> 0.0.1) - multipart-post (2.0.0) - octokit (4.13.0) - sawyer (~> 0.8.0, >= 0.5.3) - parallel (1.19.1) - parser (2.6.5.0) - ast (~> 2.4.0) - public_suffix (3.0.3) - rainbow (3.0.0) - rake (12.3.3) - rspec (3.8.0) - rspec-core (~> 3.8.0) - rspec-expectations (~> 3.8.0) - rspec-mocks (~> 3.8.0) - rspec-core (3.8.0) - rspec-support (~> 3.8.0) - rspec-expectations (3.8.1) + mutex_m (0.2.0) + octokit (8.0.0) + faraday (>= 1, < 3) + sawyer (~> 0.9) + parallel (1.23.0) + parser (3.2.2.4) + ast (~> 2.4.1) + racc + public_suffix (5.0.4) + racc (1.7.3) + rainbow (3.1.1) + rake (13.1.0) + regexp_parser (2.8.3) + rexml (3.3.9) + rspec (3.12.0) + rspec-core (~> 3.12.0) + rspec-expectations (~> 3.12.0) + rspec-mocks (~> 3.12.0) + rspec-core (3.12.2) + rspec-support (~> 3.12.0) + rspec-expectations (3.12.3) diff-lcs (>= 1.2.0, < 2.0) - rspec-support (~> 3.8.0) - rspec-mocks (3.8.0) + rspec-support (~> 3.12.0) + rspec-mocks (3.12.6) diff-lcs (>= 1.2.0, < 2.0) - rspec-support (~> 3.8.0) - rspec-support (3.8.0) - rubocop (0.77.0) - jaro_winkler (~> 1.5.1) + rspec-support (~> 3.12.0) + rspec-support (3.12.1) + rubocop (1.58.0) + json (~> 2.3) + language_server-protocol (>= 3.17.0) parallel (~> 1.10) - parser (>= 2.6) + parser (>= 3.2.2.4) rainbow (>= 2.2.2, < 4.0) + regexp_parser (>= 1.8, < 3.0) + rexml (>= 3.2.5, < 4.0) + rubocop-ast (>= 1.30.0, < 2.0) ruby-progressbar (~> 1.7) unicode-display_width (>= 1.4.0, < 1.7) ruby-progressbar (1.10.1) @@ -60,7 +83,6 @@ GEM tzinfo (1.2.11) thread_safe (~> 0.1) unicode-display_width (1.6.0) - PLATFORMS ruby @@ -74,5 +96,4 @@ DEPENDENCIES rubocop (>= 0.77.0) BUNDLED WITH - 1.17.2 - + 2.4.13 diff --git a/mmv1/api/resource.rb b/mmv1/api/resource.rb index 2fbc09182..cd27fcd9d 100644 --- a/mmv1/api/resource.rb +++ b/mmv1/api/resource.rb @@ -174,7 +174,6 @@ def validate set_variables(@parameters, :__resource) set_variables(@properties, :__resource) - check :properties, type: Array, item_type: Api::Type, required: true unless @exclude check :parameters, type: Array, item_type: Api::Type unless @exclude diff --git a/mmv1/compiler.rb b/mmv1/compiler.rb index bdb7f6d4d..1c9c71f77 100755 --- a/mmv1/compiler.rb +++ b/mmv1/compiler.rb @@ -23,6 +23,7 @@ # generation. ENV['TZ'] = 'UTC' +require 'active_support' require 'active_support/inflector' require 'active_support/core_ext/array/conversions' require 'api/compiler' diff --git a/mmv1/google/yaml_validator.rb b/mmv1/google/yaml_validator.rb index b50bcfba5..0ea04e327 100644 --- a/mmv1/google/yaml_validator.rb +++ b/mmv1/google/yaml_validator.rb @@ -22,7 +22,7 @@ def parse(content) # TODO(nelsonjr): Allow specifying which symbols to restrict it further. # But it requires inspecting all configuration files for symbol sources, # such as Enum values. Leaving it as a nice-to-have for the future. - YAML.safe_load(content, allowed_classes) + YAML.safe_load(content, permitted_classes: allowed_classes) end def allowed_classes diff --git a/mmv1/products/apigee/api.yaml b/mmv1/products/apigee/api.yaml index 1c0bfcec7..a8b763dd4 100644 --- a/mmv1/products/apigee/api.yaml +++ b/mmv1/products/apigee/api.yaml @@ -498,3 +498,1066 @@ objects: 'Creating an environment': 'https://cloud.google.com/apigee/docs/api-platform/get-started/create-environment' api: 'https://cloud.google.com/apigee/docs/reference/apis/apigee/rest/v1/organizations.endpointAttachments/create' + + + + - !ruby/object:Api::Resource + name: Organization + base_url: '{{parent}}' + self_link: '{{name}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/apigee/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + No description + properties: + + - !ruby/object:Api::Type::String + name: 'apiConsumerDataEncryptionKeyName' + description: | + Cloud KMS key name used for encrypting API consumer data. Required for US/EU regions when [BillingType](#BillingType) is `SUBSCRIPTION`. When [BillingType](#BillingType) is `EVALUATION` or the region is not US/EU, a Google-Managed encryption key will be used. Format: `projects/*/locations/*/keyRings/*/cryptoKeys/*` + - !ruby/object:Api::Type::String + name: 'runtimeDatabaseEncryptionKeyName' + description: | + Cloud KMS key name used for encrypting the data that is stored and replicated across runtime instances. Update is not allowed after the organization is created. Required when [RuntimeType](#RuntimeType) is `CLOUD`. If not specified when [RuntimeType](#RuntimeType) is `TRIAL`, a Google-Managed encryption key will be used. For example: "projects/foo/locations/us/keyRings/bar/cryptoKeys/baz". **Note:** Not supported for Apigee hybrid. + - !ruby/object:Api::Type::Array + name: 'environments' + description: | + Output only. List of environments in the Apigee organization. + item_type: Api::Type::String + - !ruby/object:Api::Type::Enum + name: 'runtimeType' + description: | + Required. Runtime type of the Apigee organization based on the Apigee subscription purchased. + values: + - :RUNTIME_TYPE_UNSPECIFIED + - :CLOUD + - :HYBRID + - !ruby/object:Api::Type::Enum + name: 'type' + description: | + Not used by Apigee. + values: + - :TYPE_UNSPECIFIED + - :TYPE_TRIAL + - :TYPE_PAID + - :TYPE_INTERNAL + - !ruby/object:Api::Type::Boolean + name: 'portalDisabled' + description: | + Configuration for the Portals settings. + - !ruby/object:Api::Type::String + name: 'authorizedNetwork' + description: | + Compute Engine network used for Service Networking to be peered with Apigee runtime instances. See [Getting started with the Service Networking API](https://cloud.google.com/service-infrastructure/docs/service-networking/getting-started). Valid only when [RuntimeType](#RuntimeType) is set to `CLOUD`. The value must be set before the creation of a runtime instance and can be updated only when there are no runtime instances. For example: `default`. Apigee also supports shared VPC (that is, the host network project is not the same as the one that is peering with Apigee). See [Shared VPC overview](https://cloud.google.com/vpc/docs/shared-vpc). To use a shared VPC network, use the following format: `projects/{host-project-id}/{region}/networks/{network-name}`. For example: `projects/my-sharedvpc-host/global/networks/mynetwork` **Note:** Not supported for Apigee hybrid. + - !ruby/object:Api::Type::String + name: 'projectId' + description: | + Output only. Project ID associated with the Apigee organization. + - !ruby/object:Api::Type::String + name: 'description' + description: | + Description of the Apigee organization. + - !ruby/object:Api::Type::String + name: 'caCertificate' + description: | + Output only. Base64-encoded public certificate for the root CA of the Apigee organization. Valid only when [RuntimeType](#RuntimeType) is `CLOUD`. + - !ruby/object:Api::Type::Enum + name: 'subscriptionType' + description: | + Output only. DEPRECATED: This will eventually be replaced by BillingType. Subscription type of the Apigee organization. Valid values include trial (free, limited, and for evaluation purposes only) or paid (full subscription has been purchased). See [Apigee pricing](https://cloud.google.com/apigee/pricing/). + values: + - :SUBSCRIPTION_TYPE_UNSPECIFIED + - :PAID + - :TRIAL + - !ruby/object:Api::Type::NestedObject + name: 'addonsConfig' + description: | + Add-on configurations for the Apigee organization. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'integrationConfig' + description: | + Configuration for the Integration add-on. + properties: + - !ruby/object:Api::Type::Boolean + name: 'enabled' + description: | + Flag that specifies whether the Integration add-on is enabled. + - !ruby/object:Api::Type::NestedObject + name: 'apiSecurityConfig' + description: | + Configurations of the API Security add-on. + properties: + - !ruby/object:Api::Type::Boolean + name: 'enabled' + description: | + Flag that specifies whether the API security add-on is enabled. + - !ruby/object:Api::Type::String + name: 'expiresAt' + description: | + Output only. Time at which the API Security add-on expires in in milliseconds since epoch. If unspecified, the add-on will never expire. + - !ruby/object:Api::Type::NestedObject + name: 'monetizationConfig' + description: | + Configuration for the Monetization add-on. + properties: + - !ruby/object:Api::Type::Boolean + name: 'enabled' + description: | + Flag that specifies whether the Monetization add-on is enabled. + - !ruby/object:Api::Type::NestedObject + name: 'connectorsPlatformConfig' + description: | + Configuration for the Connectors Platform add-on. + properties: + - !ruby/object:Api::Type::String + name: 'expiresAt' + description: | + Output only. Time at which the Connectors Platform add-on expires in milliseconds since epoch. If unspecified, the add-on will never expire. + - !ruby/object:Api::Type::Boolean + name: 'enabled' + description: | + Flag that specifies whether the Connectors Platform add-on is enabled. + - !ruby/object:Api::Type::NestedObject + name: 'analyticsConfig' + description: | + Configuration for the Analytics add-on. + properties: + - !ruby/object:Api::Type::String + name: 'expireTimeMillis' + description: | + Output only. Time at which the Analytics add-on expires in milliseconds since epoch. If unspecified, the add-on will never expire. + - !ruby/object:Api::Type::Enum + name: 'state' + description: | + Output only. The state of the Analytics add-on. + values: + - :ADDON_STATE_UNSPECIFIED + - :ENABLING + - :ENABLED + - :DISABLING + - :DISABLED + - !ruby/object:Api::Type::Boolean + name: 'enabled' + description: | + Whether the Analytics add-on is enabled. + - !ruby/object:Api::Type::String + name: 'updateTime' + description: | + Output only. The latest update time. + - !ruby/object:Api::Type::NestedObject + name: 'advancedApiOpsConfig' + description: | + Configuration for the Advanced API Ops add-on. + properties: + - !ruby/object:Api::Type::Boolean + name: 'enabled' + description: | + Flag that specifies whether the Advanced API Ops add-on is enabled. + - !ruby/object:Api::Type::String + name: 'customerName' + description: | + Not used by Apigee. + - !ruby/object:Api::Type::String + name: 'createdAt' + description: | + Output only. Time that the Apigee organization was created in milliseconds since epoch. + - !ruby/object:Api::Type::String + name: 'lastModifiedAt' + description: | + Output only. Time that the Apigee organization was last modified in milliseconds since epoch. + - !ruby/object:Api::Type::Enum + name: 'subscriptionPlan' + description: | + Output only. Subscription plan that the customer has purchased. Output only. + values: + - :SUBSCRIPTION_PLAN_UNSPECIFIED + - :SUBSCRIPTION_2021 + - :SUBSCRIPTION_2024 + - !ruby/object:Api::Type::NestedObject + name: 'properties' + description: | + Message for compatibility with legacy Edge specification for Java Properties object in JSON. + properties: + - !ruby/object:Api::Type::Array + name: 'property' + description: | + List of all properties in the object + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'value' + description: | + The property value + - !ruby/object:Api::Type::String + name: 'name' + description: | + The property key + - !ruby/object:Api::Type::Enum + name: 'state' + description: | + Output only. State of the organization. Values other than ACTIVE means the resource is not ready to use. + values: + - :STATE_UNSPECIFIED + - :CREATING + - :ACTIVE + - :DELETING + - :UPDATING + - !ruby/object:Api::Type::String + name: 'name' + description: | + Output only. Name of the Apigee organization. + - !ruby/object:Api::Type::Boolean + name: 'disableVpcPeering' + description: | + Optional. Flag that specifies whether the VPC Peering through Private Google Access should be disabled between the consumer network and Apigee. Valid only when RuntimeType is set to CLOUD. Required if an authorizedNetwork on the consumer project is not provided, in which case the flag should be set to true. The value must be set before the creation of any Apigee runtime instance and can be updated only when there are no runtime instances. **Note:** Apigee will be deprecating the vpc peering model that requires you to provide 'authorizedNetwork', by making the non-peering model as the default way of provisioning Apigee organization in future. So, this will be a temporary flag to enable the transition. Not supported for Apigee hybrid. + - !ruby/object:Api::Type::String + name: 'controlPlaneEncryptionKeyName' + description: | + Cloud KMS key name used for encrypting control plane data that is stored in a multi region. Required when [BillingType](#BillingType) is `SUBSCRIPTION`. When [BillingType](#BillingType) is `EVALUATION`, a Google-Managed encryption key will be used. Format: `projects/*/locations/*/keyRings/*/cryptoKeys/*` + - !ruby/object:Api::Type::String + name: 'analyticsRegion' + description: | + Required. DEPRECATED: This field will eventually be deprecated and replaced with a differently-named field. Primary Google Cloud region for analytics data storage. For valid values, see [Create an Apigee organization](https://cloud.google.com/apigee/docs/api-platform/get-started/create-org). + - !ruby/object:Api::Type::String + name: 'apiConsumerDataLocation' + description: | + This field is needed only for customers with control plane in US or EU. Apigee stores some control plane data only in single region. This field determines which single region Apigee should use. For example: "us-west1" when control plane is in US or "europe-west2" when control plane is in EU. + - !ruby/object:Api::Type::String + name: 'displayName' + description: | + Display name for the Apigee organization. Unused, but reserved for future use. + - !ruby/object:Api::Type::String + name: 'apigeeProjectId' + description: | + Output only. Apigee Project ID associated with the organization. Use this project to allowlist Apigee in the Service Attachment when using private service connect with Apigee. + - !ruby/object:Api::Type::String + name: 'expiresAt' + description: | + Output only. Time that the Apigee organization is scheduled for deletion. + - !ruby/object:Api::Type::Array + name: 'attributes' + description: | + Not used by Apigee. + item_type: Api::Type::String + - !ruby/object:Api::Type::Enum + name: 'billingType' + description: | + Billing type of the Apigee organization. See [Apigee pricing](https://cloud.google.com/apigee/pricing). + values: + - :BILLING_TYPE_UNSPECIFIED + - :SUBSCRIPTION + - :EVALUATION + - :PAYG + + + + + - !ruby/object:Api::Resource + name: Organization + base_url: '{{parent}}' + self_link: '{{name}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/apigee/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + No description + properties: + + - !ruby/object:Api::Type::String + name: 'apiConsumerDataEncryptionKeyName' + description: | + Cloud KMS key name used for encrypting API consumer data. Required for US/EU regions when [BillingType](#BillingType) is `SUBSCRIPTION`. When [BillingType](#BillingType) is `EVALUATION` or the region is not US/EU, a Google-Managed encryption key will be used. Format: `projects/*/locations/*/keyRings/*/cryptoKeys/*` + - !ruby/object:Api::Type::String + name: 'runtimeDatabaseEncryptionKeyName' + description: | + Cloud KMS key name used for encrypting the data that is stored and replicated across runtime instances. Update is not allowed after the organization is created. Required when [RuntimeType](#RuntimeType) is `CLOUD`. If not specified when [RuntimeType](#RuntimeType) is `TRIAL`, a Google-Managed encryption key will be used. For example: "projects/foo/locations/us/keyRings/bar/cryptoKeys/baz". **Note:** Not supported for Apigee hybrid. + - !ruby/object:Api::Type::Array + name: 'environments' + description: | + Output only. List of environments in the Apigee organization. + item_type: Api::Type::String + - !ruby/object:Api::Type::Enum + name: 'runtimeType' + description: | + Required. Runtime type of the Apigee organization based on the Apigee subscription purchased. + values: + - :RUNTIME_TYPE_UNSPECIFIED + - :CLOUD + - :HYBRID + - !ruby/object:Api::Type::Enum + name: 'type' + description: | + Not used by Apigee. + values: + - :TYPE_UNSPECIFIED + - :TYPE_TRIAL + - :TYPE_PAID + - :TYPE_INTERNAL + - !ruby/object:Api::Type::Boolean + name: 'portalDisabled' + description: | + Configuration for the Portals settings. + - !ruby/object:Api::Type::String + name: 'authorizedNetwork' + description: | + Compute Engine network used for Service Networking to be peered with Apigee runtime instances. See [Getting started with the Service Networking API](https://cloud.google.com/service-infrastructure/docs/service-networking/getting-started). Valid only when [RuntimeType](#RuntimeType) is set to `CLOUD`. The value must be set before the creation of a runtime instance and can be updated only when there are no runtime instances. For example: `default`. Apigee also supports shared VPC (that is, the host network project is not the same as the one that is peering with Apigee). See [Shared VPC overview](https://cloud.google.com/vpc/docs/shared-vpc). To use a shared VPC network, use the following format: `projects/{host-project-id}/{region}/networks/{network-name}`. For example: `projects/my-sharedvpc-host/global/networks/mynetwork` **Note:** Not supported for Apigee hybrid. + - !ruby/object:Api::Type::String + name: 'projectId' + description: | + Output only. Project ID associated with the Apigee organization. + - !ruby/object:Api::Type::String + name: 'description' + description: | + Description of the Apigee organization. + - !ruby/object:Api::Type::String + name: 'caCertificate' + description: | + Output only. Base64-encoded public certificate for the root CA of the Apigee organization. Valid only when [RuntimeType](#RuntimeType) is `CLOUD`. + - !ruby/object:Api::Type::Enum + name: 'subscriptionType' + description: | + Output only. DEPRECATED: This will eventually be replaced by BillingType. Subscription type of the Apigee organization. Valid values include trial (free, limited, and for evaluation purposes only) or paid (full subscription has been purchased). See [Apigee pricing](https://cloud.google.com/apigee/pricing/). + values: + - :SUBSCRIPTION_TYPE_UNSPECIFIED + - :PAID + - :TRIAL + - !ruby/object:Api::Type::NestedObject + name: 'addonsConfig' + description: | + Add-on configurations for the Apigee organization. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'integrationConfig' + description: | + Configuration for the Integration add-on. + properties: + - !ruby/object:Api::Type::Boolean + name: 'enabled' + description: | + Flag that specifies whether the Integration add-on is enabled. + - !ruby/object:Api::Type::NestedObject + name: 'apiSecurityConfig' + description: | + Configurations of the API Security add-on. + properties: + - !ruby/object:Api::Type::Boolean + name: 'enabled' + description: | + Flag that specifies whether the API security add-on is enabled. + - !ruby/object:Api::Type::String + name: 'expiresAt' + description: | + Output only. Time at which the API Security add-on expires in in milliseconds since epoch. If unspecified, the add-on will never expire. + - !ruby/object:Api::Type::NestedObject + name: 'monetizationConfig' + description: | + Configuration for the Monetization add-on. + properties: + - !ruby/object:Api::Type::Boolean + name: 'enabled' + description: | + Flag that specifies whether the Monetization add-on is enabled. + - !ruby/object:Api::Type::NestedObject + name: 'connectorsPlatformConfig' + description: | + Configuration for the Connectors Platform add-on. + properties: + - !ruby/object:Api::Type::String + name: 'expiresAt' + description: | + Output only. Time at which the Connectors Platform add-on expires in milliseconds since epoch. If unspecified, the add-on will never expire. + - !ruby/object:Api::Type::Boolean + name: 'enabled' + description: | + Flag that specifies whether the Connectors Platform add-on is enabled. + - !ruby/object:Api::Type::NestedObject + name: 'analyticsConfig' + description: | + Configuration for the Analytics add-on. + properties: + - !ruby/object:Api::Type::String + name: 'expireTimeMillis' + description: | + Output only. Time at which the Analytics add-on expires in milliseconds since epoch. If unspecified, the add-on will never expire. + - !ruby/object:Api::Type::Enum + name: 'state' + description: | + Output only. The state of the Analytics add-on. + values: + - :ADDON_STATE_UNSPECIFIED + - :ENABLING + - :ENABLED + - :DISABLING + - :DISABLED + - !ruby/object:Api::Type::Boolean + name: 'enabled' + description: | + Whether the Analytics add-on is enabled. + - !ruby/object:Api::Type::String + name: 'updateTime' + description: | + Output only. The latest update time. + - !ruby/object:Api::Type::NestedObject + name: 'advancedApiOpsConfig' + description: | + Configuration for the Advanced API Ops add-on. + properties: + - !ruby/object:Api::Type::Boolean + name: 'enabled' + description: | + Flag that specifies whether the Advanced API Ops add-on is enabled. + - !ruby/object:Api::Type::String + name: 'customerName' + description: | + Not used by Apigee. + - !ruby/object:Api::Type::String + name: 'createdAt' + description: | + Output only. Time that the Apigee organization was created in milliseconds since epoch. + - !ruby/object:Api::Type::String + name: 'lastModifiedAt' + description: | + Output only. Time that the Apigee organization was last modified in milliseconds since epoch. + - !ruby/object:Api::Type::Enum + name: 'subscriptionPlan' + description: | + Output only. Subscription plan that the customer has purchased. Output only. + values: + - :SUBSCRIPTION_PLAN_UNSPECIFIED + - :SUBSCRIPTION_2021 + - :SUBSCRIPTION_2024 + - !ruby/object:Api::Type::NestedObject + name: 'properties' + description: | + Message for compatibility with legacy Edge specification for Java Properties object in JSON. + properties: + - !ruby/object:Api::Type::Array + name: 'property' + description: | + List of all properties in the object + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'value' + description: | + The property value + - !ruby/object:Api::Type::String + name: 'name' + description: | + The property key + - !ruby/object:Api::Type::Enum + name: 'state' + description: | + Output only. State of the organization. Values other than ACTIVE means the resource is not ready to use. + values: + - :STATE_UNSPECIFIED + - :CREATING + - :ACTIVE + - :DELETING + - :UPDATING + - !ruby/object:Api::Type::String + name: 'name' + description: | + Output only. Name of the Apigee organization. + - !ruby/object:Api::Type::Boolean + name: 'disableVpcPeering' + description: | + Optional. Flag that specifies whether the VPC Peering through Private Google Access should be disabled between the consumer network and Apigee. Valid only when RuntimeType is set to CLOUD. Required if an authorizedNetwork on the consumer project is not provided, in which case the flag should be set to true. The value must be set before the creation of any Apigee runtime instance and can be updated only when there are no runtime instances. **Note:** Apigee will be deprecating the vpc peering model that requires you to provide 'authorizedNetwork', by making the non-peering model as the default way of provisioning Apigee organization in future. So, this will be a temporary flag to enable the transition. Not supported for Apigee hybrid. + - !ruby/object:Api::Type::String + name: 'controlPlaneEncryptionKeyName' + description: | + Cloud KMS key name used for encrypting control plane data that is stored in a multi region. Required when [BillingType](#BillingType) is `SUBSCRIPTION`. When [BillingType](#BillingType) is `EVALUATION`, a Google-Managed encryption key will be used. Format: `projects/*/locations/*/keyRings/*/cryptoKeys/*` + - !ruby/object:Api::Type::String + name: 'analyticsRegion' + description: | + Required. DEPRECATED: This field will eventually be deprecated and replaced with a differently-named field. Primary Google Cloud region for analytics data storage. For valid values, see [Create an Apigee organization](https://cloud.google.com/apigee/docs/api-platform/get-started/create-org). + - !ruby/object:Api::Type::String + name: 'apiConsumerDataLocation' + description: | + This field is needed only for customers with control plane in US or EU. Apigee stores some control plane data only in single region. This field determines which single region Apigee should use. For example: "us-west1" when control plane is in US or "europe-west2" when control plane is in EU. + - !ruby/object:Api::Type::String + name: 'displayName' + description: | + Display name for the Apigee organization. Unused, but reserved for future use. + - !ruby/object:Api::Type::String + name: 'apigeeProjectId' + description: | + Output only. Apigee Project ID associated with the organization. Use this project to allowlist Apigee in the Service Attachment when using private service connect with Apigee. + - !ruby/object:Api::Type::String + name: 'expiresAt' + description: | + Output only. Time that the Apigee organization is scheduled for deletion. + - !ruby/object:Api::Type::Array + name: 'attributes' + description: | + Not used by Apigee. + item_type: Api::Type::String + - !ruby/object:Api::Type::Enum + name: 'billingType' + description: | + Billing type of the Apigee organization. See [Apigee pricing](https://cloud.google.com/apigee/pricing). + values: + - :BILLING_TYPE_UNSPECIFIED + - :SUBSCRIPTION + - :EVALUATION + - :PAYG + + + + + - !ruby/object:Api::Resource + name: OrganizationEnvgroup + base_url: '{{parent}}/envgroups' + self_link: '{{name}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/apigee/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + EnvironmentGroup configuration. An environment group is used to group one or more Apigee environments under a single host name. + properties: + + - !ruby/object:Api::Type::String + name: 'name' + description: | + ID of the environment group. + - !ruby/object:Api::Type::String + name: 'lastModifiedAt' + description: | + Output only. The time at which the environment group was last updated as milliseconds since epoch. + - !ruby/object:Api::Type::Array + name: 'hostnames' + description: | + Required. Host names for this environment group. + item_type: Api::Type::String + - !ruby/object:Api::Type::Enum + name: 'state' + description: | + Output only. State of the environment group. Values other than ACTIVE means the resource is not ready to use. + values: + - :STATE_UNSPECIFIED + - :CREATING + - :ACTIVE + - :DELETING + - :UPDATING + - !ruby/object:Api::Type::String + name: 'createdAt' + description: | + Output only. The time at which the environment group was created as milliseconds since epoch. + + + + + - !ruby/object:Api::Resource + name: OrganizationEnvgroup + base_url: '{{parent}}/envgroups' + self_link: '{{name}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/apigee/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + EnvironmentGroup configuration. An environment group is used to group one or more Apigee environments under a single host name. + properties: + + - !ruby/object:Api::Type::String + name: 'name' + description: | + ID of the environment group. + - !ruby/object:Api::Type::String + name: 'lastModifiedAt' + description: | + Output only. The time at which the environment group was last updated as milliseconds since epoch. + - !ruby/object:Api::Type::Array + name: 'hostnames' + description: | + Required. Host names for this environment group. + item_type: Api::Type::String + - !ruby/object:Api::Type::Enum + name: 'state' + description: | + Output only. State of the environment group. Values other than ACTIVE means the resource is not ready to use. + values: + - :STATE_UNSPECIFIED + - :CREATING + - :ACTIVE + - :DELETING + - :UPDATING + - !ruby/object:Api::Type::String + name: 'createdAt' + description: | + Output only. The time at which the environment group was created as milliseconds since epoch. + + + + + - !ruby/object:Api::Resource + name: OrganizationApiRevision + self_link: '{{name}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/apigee/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + Message that represents an arbitrary HTTP body. It should only be used for payload formats that can't be represented as JSON, such as raw binary or an HTML page. This message can be used both in streaming and non-streaming API methods in the request as well as the response. It can be used as a top-level request field, which is convenient if one wants to extract parameters from either the URL or HTTP template into the request fields and also want access to the raw HTTP body. Example: message GetResourceRequest { // A unique request id. string request_id = 1; // The raw HTTP body is bound to this field. google.api.HttpBody http_body = 2; } service ResourceService { rpc GetResource(GetResourceRequest) returns (google.api.HttpBody); rpc UpdateResource(google.api.HttpBody) returns (google.protobuf.Empty); } Example with streaming methods: service CaldavService { rpc GetCalendar(stream google.api.HttpBody) returns (stream google.api.HttpBody); rpc UpdateCalendar(stream google.api.HttpBody) returns (stream google.api.HttpBody); } Use of this type only changes how the request and response bodies are handled, all other features will continue to work unchanged. + properties: + + - !ruby/object:Api::Type::Array + name: 'extensions' + description: | + Application specific response metadata. Must be set in the first response for streaming APIs. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'contentType' + description: | + The HTTP Content-Type header value specifying the content type of the body. + - !ruby/object:Api::Type::String + name: 'data' + description: | + The HTTP request/response body as raw binary. + + + + + - !ruby/object:Api::Resource + name: OrganizationApi + base_url: '{{parent}}/apis' + self_link: '{{name}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/apigee/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + Metadata describing the API proxy + properties: + + - !ruby/object:Api::Type::Array + name: 'revision' + description: | + Output only. List of revisions defined for the API proxy. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'latestRevisionId' + description: | + Output only. The id of the most recently created revision for this api proxy. + - !ruby/object:Api::Type::NestedObject + name: 'metaData' + description: | + Metadata common to many entities in this API. + properties: + - !ruby/object:Api::Type::String + name: 'lastModifiedAt' + description: | + Time at which the API proxy was most recently modified, in milliseconds since epoch. + - !ruby/object:Api::Type::String + name: 'createdAt' + description: | + Time at which the API proxy was created, in milliseconds since epoch. + - !ruby/object:Api::Type::String + name: 'subType' + description: | + The type of entity described + - !ruby/object:Api::Type::Enum + name: 'apiProxyType' + description: | + Output only. The type of the API proxy. + values: + - :API_PROXY_TYPE_UNSPECIFIED + - :PROGRAMMABLE + - :CONFIGURABLE + - !ruby/object:Api::Type::Boolean + name: 'readOnly' + description: | + Output only. Whether this proxy is read-only. A read-only proxy cannot have new revisions created through calls to CreateApiProxyRevision. A proxy is read-only if it was generated by an archive. + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + User labels applied to this API Proxy. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::String + name: 'name' + description: | + Output only. Name of the API proxy. + + + + + - !ruby/object:Api::Resource + name: OrganizationApi + base_url: '{{parent}}/apis' + self_link: '{{name}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/apigee/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + Metadata describing the API proxy + properties: + + - !ruby/object:Api::Type::Array + name: 'revision' + description: | + Output only. List of revisions defined for the API proxy. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'latestRevisionId' + description: | + Output only. The id of the most recently created revision for this api proxy. + - !ruby/object:Api::Type::NestedObject + name: 'metaData' + description: | + Metadata common to many entities in this API. + properties: + - !ruby/object:Api::Type::String + name: 'lastModifiedAt' + description: | + Time at which the API proxy was most recently modified, in milliseconds since epoch. + - !ruby/object:Api::Type::String + name: 'createdAt' + description: | + Time at which the API proxy was created, in milliseconds since epoch. + - !ruby/object:Api::Type::String + name: 'subType' + description: | + The type of entity described + - !ruby/object:Api::Type::Enum + name: 'apiProxyType' + description: | + Output only. The type of the API proxy. + values: + - :API_PROXY_TYPE_UNSPECIFIED + - :PROGRAMMABLE + - :CONFIGURABLE + - !ruby/object:Api::Type::Boolean + name: 'readOnly' + description: | + Output only. Whether this proxy is read-only. A read-only proxy cannot have new revisions created through calls to CreateApiProxyRevision. A proxy is read-only if it was generated by an archive. + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + User labels applied to this API Proxy. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::String + name: 'name' + description: | + Output only. Name of the API proxy. + + + + + - !ruby/object:Api::Resource + name: OrganizationEnvgroupAttachment + base_url: '{{parent}}/attachments' + self_link: '{{name}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/apigee/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + EnvironmentGroupAttachment is a resource which defines an attachment of an environment to an environment group. + properties: + + - !ruby/object:Api::Type::String + name: 'createdAt' + description: | + Output only. The time at which the environment group attachment was created as milliseconds since epoch. + - !ruby/object:Api::Type::String + name: 'environment' + description: | + Required. ID of the attached environment. + - !ruby/object:Api::Type::String + name: 'environmentGroupId' + description: | + Output only. ID of the environment group. + - !ruby/object:Api::Type::String + name: 'name' + description: | + ID of the environment group attachment. + + + + + - !ruby/object:Api::Resource + name: OrganizationEnvgroupAttachment + base_url: '{{parent}}/attachments' + self_link: '{{name}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/apigee/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + EnvironmentGroupAttachment is a resource which defines an attachment of an environment to an environment group. + properties: + + - !ruby/object:Api::Type::String + name: 'createdAt' + description: | + Output only. The time at which the environment group attachment was created as milliseconds since epoch. + - !ruby/object:Api::Type::String + name: 'environment' + description: | + Required. ID of the attached environment. + - !ruby/object:Api::Type::String + name: 'environmentGroupId' + description: | + Output only. ID of the environment group. + - !ruby/object:Api::Type::String + name: 'name' + description: | + ID of the environment group attachment. + + + + + - !ruby/object:Api::Resource + name: OrganizationInstanceAttachment + base_url: '{{+parent}}/attachments' + self_link: '{{+name}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/apigee/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + InstanceAttachment represents the installation of an environment onto an instance. + properties: + + - !ruby/object:Api::Type::String + name: 'name' + description: | + Output only. ID of the attachment. + - !ruby/object:Api::Type::String + name: 'environment' + description: | + ID of the attached environment. + - !ruby/object:Api::Type::String + name: 'createdAt' + description: | + Output only. Time the attachment was created in milliseconds since epoch. + + + + + - !ruby/object:Api::Resource + name: OrganizationInstanceAttachment + base_url: '{{+parent}}/attachments' + self_link: '{{+name}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/apigee/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + InstanceAttachment represents the installation of an environment onto an instance. + properties: + + - !ruby/object:Api::Type::String + name: 'name' + description: | + Output only. ID of the attachment. + - !ruby/object:Api::Type::String + name: 'environment' + description: | + ID of the attached environment. + - !ruby/object:Api::Type::String + name: 'createdAt' + description: | + Output only. Time the attachment was created in milliseconds since epoch. + diff --git a/mmv1/products/apigee/inspec.yaml b/mmv1/products/apigee/inspec.yaml new file mode 100644 index 000000000..28cb365e9 --- /dev/null +++ b/mmv1/products/apigee/inspec.yaml @@ -0,0 +1,15 @@ +# Copyright 2017 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- !ruby/object:Provider::Inspec::Config +overrides: !ruby/object:Overrides::ResourceOverrides diff --git a/mmv1/products/artifactregistry/api.yaml b/mmv1/products/artifactregistry/api.yaml index 65e7ecd4a..e61b9beca 100644 --- a/mmv1/products/artifactregistry/api.yaml +++ b/mmv1/products/artifactregistry/api.yaml @@ -12,14 +12,14 @@ # limitations under the License. --- !ruby/object:Api::Product -name: ArtifactRegistry +name: Artifactregistry display_name: Artifact Registry scopes: - https://www.googleapis.com/auth/cloud-platform versions: - !ruby/object:Api::Product::Version - name: beta - base_url: https://artifactregistry.googleapis.com/v1beta2/ + name: ga + base_url: https://artifactregistry.googleapis.com/v1beta1/ apis_required: - !ruby/object:Api::Product::ApiReference name: Artifact Registry API @@ -56,7 +56,7 @@ objects: guides: 'Official Documentation': 'https://cloud.google.com/artifact-registry/docs/overview' - api: 'https://cloud.google.com/artifact-registry/docs/reference/rest/v1beta2/projects.locations.repositories' + api: 'https://cloud.google.com/artifact-registry/docs/reference/rest/v1beta1/projects.locations.repositories' iam_policy: !ruby/object:Api::Resource::IamPolicy exclude: false method_name_separator: ':' @@ -155,3 +155,283 @@ objects: - :RELEASE - :SNAPSHOT default_value: :VERSION_POLICY_UNSPECIFIED + + + + - !ruby/object:Api::Resource + name: ProjectLocationRepository + base_url: 'v1beta1/{{parent}}/repositories' + self_link: 'v1beta1/{{name}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/artifactregistry/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + A Repository for storing artifacts with a specific format. + properties: + + - !ruby/object:Api::Type::NestedObject + name: 'mavenConfig' + description: | + MavenRepositoryConfig is maven related repository details. Provides additional configuration details for repositories of the maven format type. + properties: + - !ruby/object:Api::Type::Boolean + name: 'allowSnapshotOverwrites' + description: | + The repository with this flag will allow publishing the same snapshot versions. + - !ruby/object:Api::Type::Enum + name: 'versionPolicy' + description: | + Version policy defines the versions that the registry will accept. + values: + - :VERSION_POLICY_UNSPECIFIED + - :RELEASE + - :SNAPSHOT + - !ruby/object:Api::Type::NestedObject + name: 'dockerConfig' + description: | + DockerRepositoryConfig is docker related repository details. Provides additional configuration details for repositories of the docker format type. + properties: + - !ruby/object:Api::Type::Boolean + name: 'immutableTags' + description: | + The repository which enabled this flag prevents all tags from being modified, moved or deleted. This does not prevent tags from being created. + - !ruby/object:Api::Type::NestedObject + name: 'virtualRepositoryConfig' + description: | + Virtual repository configuration. + properties: + - !ruby/object:Api::Type::Array + name: 'upstreamPolicies' + description: | + Policies that configure the upstream artifacts distributed by the Virtual Repository. Upstream policies cannot be set on a standard repository. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'id' + description: | + The user-provided ID of the upstream policy. + - !ruby/object:Api::Type::String + name: 'repository' + description: | + A reference to the repository resource, for example: `projects/p1/locations/us-central1/repositories/repo1`. + - !ruby/object:Api::Type::Integer + name: 'priority' + description: | + Entries with a greater priority value take precedence in the pull order. + - !ruby/object:Api::Type::NestedObject + name: 'remoteRepositoryConfig' + description: | + Remote repository configuration. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'dockerRepository' + description: | + Configuration for a Docker remote repository. + properties: + - !ruby/object:Api::Type::Enum + name: 'publicRepository' + description: | + One of the publicly available Docker repositories supported by Artifact Registry. + values: + - :PUBLIC_REPOSITORY_UNSPECIFIED + - :DOCKER_HUB + - !ruby/object:Api::Type::NestedObject + name: 'mavenRepository' + description: | + Configuration for a Maven remote repository. + properties: + - !ruby/object:Api::Type::Enum + name: 'publicRepository' + description: | + One of the publicly available Maven repositories supported by Artifact Registry. + values: + - :PUBLIC_REPOSITORY_UNSPECIFIED + - :MAVEN_CENTRAL + - !ruby/object:Api::Type::NestedObject + name: 'npmRepository' + description: | + Configuration for a Npm remote repository. + properties: + - !ruby/object:Api::Type::Enum + name: 'publicRepository' + description: | + One of the publicly available Npm repositories supported by Artifact Registry. + values: + - :PUBLIC_REPOSITORY_UNSPECIFIED + - :NPMJS + - !ruby/object:Api::Type::NestedObject + name: 'pythonRepository' + description: | + Configuration for a Python remote repository. + properties: + - !ruby/object:Api::Type::Enum + name: 'publicRepository' + description: | + One of the publicly available Python repositories supported by Artifact Registry. + values: + - :PUBLIC_REPOSITORY_UNSPECIFIED + - :PYPI + - !ruby/object:Api::Type::NestedObject + name: 'aptRepository' + description: | + Configuration for an Apt remote repository. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'publicRepository' + description: | + Publicly available Apt repositories constructed from a common repository base and a custom repository path. + properties: + - !ruby/object:Api::Type::Enum + name: 'repositoryBase' + description: | + A common public repository base for Apt. + values: + - :REPOSITORY_BASE_UNSPECIFIED + - :DEBIAN + - :UBUNTU + - :DEBIAN_SNAPSHOT + - !ruby/object:Api::Type::String + name: 'repositoryPath' + description: | + A custom field to define a path to a specific repository from the base. + - !ruby/object:Api::Type::NestedObject + name: 'yumRepository' + description: | + Configuration for a Yum remote repository. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'publicRepository' + description: | + Publicly available Yum repositories constructed from a common repository base and a custom repository path. + properties: + - !ruby/object:Api::Type::Enum + name: 'repositoryBase' + description: | + A common public repository base for Yum. + values: + - :REPOSITORY_BASE_UNSPECIFIED + - :CENTOS + - :CENTOS_DEBUG + - :CENTOS_VAULT + - :CENTOS_STREAM + - :ROCKY + - :EPEL + - !ruby/object:Api::Type::String + name: 'repositoryPath' + description: | + A custom field to define a path to a specific repository from the base. + - !ruby/object:Api::Type::String + name: 'description' + description: | + The description of the remote source. + - !ruby/object:Api::Type::NestedObject + name: 'upstreamCredentials' + description: | + The credentials to access the remote repository. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'usernamePasswordCredentials' + description: | + Username and password credentials. + properties: + - !ruby/object:Api::Type::String + name: 'username' + description: | + The username to access the remote repository. + - !ruby/object:Api::Type::String + name: 'passwordSecretVersion' + description: | + The Secret Manager key version that holds the password to access the remote repository. Must be in the format of `projects/{project}/secrets/{secret}/versions/{version}`. + - !ruby/object:Api::Type::String + name: 'name' + description: | + The name of the repository, for example: `projects/p1/locations/us-central1/repositories/repo1`. + - !ruby/object:Api::Type::Enum + name: 'format' + description: | + Optional. The format of packages that are stored in the repository. + values: + - :FORMAT_UNSPECIFIED + - :DOCKER + - :MAVEN + - :NPM + - :APT + - :YUM + - :GOOGET + - :PYTHON + - :KFP + - :GO + - !ruby/object:Api::Type::String + name: 'description' + description: | + The user-provided description of the repository. + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + Labels with user-defined metadata. This field may contain up to 64 entries. Label keys and values may be no longer than 63 characters. Label keys must begin with a lowercase letter and may only contain lowercase letters, numeric characters, underscores, and dashes. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Output only. The time when the repository was created. + - !ruby/object:Api::Type::String + name: 'updateTime' + description: | + Output only. The time when the repository was last updated. + - !ruby/object:Api::Type::String + name: 'kmsKeyName' + description: | + The Cloud KMS resource name of the customer managed encryption key that's used to encrypt the contents of the Repository. Has the form: `projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key`. This value may not be changed after the Repository has been created. + - !ruby/object:Api::Type::Enum + name: 'mode' + description: | + Optional. The mode of the repository. + values: + - :MODE_UNSPECIFIED + - :STANDARD_REPOSITORY + - :VIRTUAL_REPOSITORY + - :REMOTE_REPOSITORY + - !ruby/object:Api::Type::NestedObject + name: 'cleanupPolicies' + description: | + Optional. Cleanup policies for this repository. Cleanup policies indicate when certain package versions can be automatically deleted. Map keys are policy IDs supplied by users during policy creation. They must unique within a repository and be under 128 characters in length. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + Artifact policy configuration for repository cleanup policies. + - !ruby/object:Api::Type::String + name: 'sizeBytes' + description: | + Output only. The size, in bytes, of all artifact storage in this repository. Repositories that are generally available or in public preview use this to calculate storage costs. + - !ruby/object:Api::Type::Boolean + name: 'satisfiesPzs' + description: | + Output only. If set, the repository satisfies physical zone separation. + - !ruby/object:Api::Type::Boolean + name: 'cleanupPolicyDryRun' + description: | + Optional. If true, the cleanup pipeline is prevented from deleting versions in this repository. + diff --git a/mmv1/products/artifactregistry/inspec.yaml b/mmv1/products/artifactregistry/inspec.yaml new file mode 100644 index 000000000..28cb365e9 --- /dev/null +++ b/mmv1/products/artifactregistry/inspec.yaml @@ -0,0 +1,15 @@ +# Copyright 2017 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- !ruby/object:Provider::Inspec::Config +overrides: !ruby/object:Overrides::ResourceOverrides diff --git a/mmv1/products/bigtableadmin/api.yaml b/mmv1/products/bigtableadmin/api.yaml new file mode 100644 index 000000000..8a90ac23e --- /dev/null +++ b/mmv1/products/bigtableadmin/api.yaml @@ -0,0 +1,825 @@ +--- !ruby/object:Api::Product +name: bigtableadmin +display_name: bigtableadmin +versions: + - !ruby/object:Api::Product::Version + name: ga + base_url: https://bigtableadmin.googleapis.com//v2/ +scopes: + - https://bigtableadmin.googleapis.com//auth/cloud-platform +apis_required: + - !ruby/object:Api::Product::ApiReference + name: https://bigtableadmin.googleapis.com/ + url: https://console.cloud.google.com/apis/library/bigtableadmin.googleapis.com/ +objects: + + - !ruby/object:Api::Resource + name: ProjectInstanceCluster + base_url: '{{+parent}}/clusters' + self_link: '{{+name}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/bigtableadmin/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + A resizable group of nodes in a particular cloud location, capable of serving all Tables in the parent Instance. + properties: + + - !ruby/object:Api::Type::String + name: 'name' + description: | + The unique name of the cluster. Values are of the form `projects/{project}/instances/{instance}/clusters/a-z*`. + - !ruby/object:Api::Type::String + name: 'location' + description: | + Immutable. The location where this cluster's nodes and storage reside. For best performance, clients should be located as close as possible to this cluster. Currently only zones are supported, so values should be of the form `projects/{project}/locations/{zone}`. + - !ruby/object:Api::Type::Enum + name: 'state' + description: | + Output only. The current state of the cluster. + values: + - :STATE_NOT_KNOWN + - :READY + - :CREATING + - :RESIZING + - :DISABLED + - !ruby/object:Api::Type::Integer + name: 'serveNodes' + description: | + The number of nodes in the cluster. If no value is set, Cloud Bigtable automatically allocates nodes based on your data footprint and optimized for 50% storage utilization. + - !ruby/object:Api::Type::NestedObject + name: 'clusterConfig' + description: | + Configuration for a cluster. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'clusterAutoscalingConfig' + description: | + Autoscaling config for a cluster. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'autoscalingLimits' + description: | + Limits for the number of nodes a Cluster can autoscale up/down to. + properties: + - !ruby/object:Api::Type::Integer + name: 'minServeNodes' + description: | + Required. Minimum number of nodes to scale down to. + - !ruby/object:Api::Type::Integer + name: 'maxServeNodes' + description: | + Required. Maximum number of nodes to scale up to. + - !ruby/object:Api::Type::NestedObject + name: 'autoscalingTargets' + description: | + The Autoscaling targets for a Cluster. These determine the recommended nodes. + properties: + - !ruby/object:Api::Type::Integer + name: 'cpuUtilizationPercent' + description: | + The cpu utilization that the Autoscaler should be trying to achieve. This number is on a scale from 0 (no utilization) to 100 (total utilization), and is limited between 10 and 80, otherwise it will return INVALID_ARGUMENT error. + - !ruby/object:Api::Type::Integer + name: 'storageUtilizationGibPerNode' + description: | + The storage utilization that the Autoscaler should be trying to achieve. This number is limited between 2560 (2.5TiB) and 5120 (5TiB) for a SSD cluster and between 8192 (8TiB) and 16384 (16TiB) for an HDD cluster, otherwise it will return INVALID_ARGUMENT error. If this value is set to 0, it will be treated as if it were set to the default value: 2560 for SSD, 8192 for HDD. + - !ruby/object:Api::Type::Enum + name: 'defaultStorageType' + description: | + Immutable. The type of storage used by this cluster to serve its parent instance's tables, unless explicitly overridden. + values: + - :STORAGE_TYPE_UNSPECIFIED + - :SSD + - :HDD + - !ruby/object:Api::Type::NestedObject + name: 'encryptionConfig' + description: | + Cloud Key Management Service (Cloud KMS) settings for a CMEK-protected cluster. + properties: + - !ruby/object:Api::Type::String + name: 'kmsKeyName' + description: | + Describes the Cloud KMS encryption key that will be used to protect the destination Bigtable cluster. The requirements for this key are: 1) The Cloud Bigtable service account associated with the project that contains this cluster must be granted the `cloudkms.cryptoKeyEncrypterDecrypter` role on the CMEK key. 2) Only regional keys can be used and the region of the CMEK key must match the region of the cluster. Values are of the form `projects/{project}/locations/{location}/keyRings/{keyring}/cryptoKeys/{key}` + + - !ruby/object:Api::Resource + name: ProjectInstanceAppProfile + base_url: '{{+parent}}/appProfiles' + self_link: '{{+name}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/bigtableadmin/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + A configuration object describing how Cloud Bigtable should treat traffic from a particular end user application. + properties: + + - !ruby/object:Api::Type::String + name: 'name' + description: | + The unique name of the app profile. Values are of the form `projects/{project}/instances/{instance}/appProfiles/_a-zA-Z0-9*`. + - !ruby/object:Api::Type::String + name: 'etag' + description: | + Strongly validated etag for optimistic concurrency control. Preserve the value returned from `GetAppProfile` when calling `UpdateAppProfile` to fail the request if there has been a modification in the mean time. The `update_mask` of the request need not include `etag` for this protection to apply. See [Wikipedia](https://en.wikipedia.org/wiki/HTTP_ETag) and [RFC 7232](https://tools.ietf.org/html/rfc7232#section-2.3) for more details. + - !ruby/object:Api::Type::String + name: 'description' + description: | + Long form description of the use case for this AppProfile. + - !ruby/object:Api::Type::NestedObject + name: 'multiClusterRoutingUseAny' + description: | + Read/write requests are routed to the nearest cluster in the instance, and will fail over to the nearest cluster that is available in the event of transient errors or delays. Clusters in a region are considered equidistant. Choosing this option sacrifices read-your-writes consistency to improve availability. + properties: + - !ruby/object:Api::Type::Array + name: 'clusterIds' + description: | + The set of clusters to route to. The order is ignored; clusters will be tried in order of distance. If left empty, all clusters are eligible. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'singleClusterRouting' + description: | + Unconditionally routes all read/write requests to a specific cluster. This option preserves read-your-writes consistency but does not improve availability. + properties: + - !ruby/object:Api::Type::String + name: 'clusterId' + description: | + The cluster to which read/write requests should be routed. + - !ruby/object:Api::Type::Boolean + name: 'allowTransactionalWrites' + description: | + Whether or not `CheckAndMutateRow` and `ReadModifyWriteRow` requests are allowed by this app profile. It is unsafe to send these requests to the same table/row/column in multiple clusters. + - !ruby/object:Api::Type::Enum + name: 'priority' + description: | + This field has been deprecated in favor of `standard_isolation.priority`. If you set this field, `standard_isolation.priority` will be set instead. The priority of requests sent using this app profile. + values: + - :PRIORITY_UNSPECIFIED + - :PRIORITY_LOW + - :PRIORITY_MEDIUM + - :PRIORITY_HIGH + - !ruby/object:Api::Type::NestedObject + name: 'standardIsolation' + description: | + Standard options for isolating this app profile's traffic from other use cases. + properties: + - !ruby/object:Api::Type::Enum + name: 'priority' + description: | + The priority of requests sent using this app profile. + values: + - :PRIORITY_UNSPECIFIED + - :PRIORITY_LOW + - :PRIORITY_MEDIUM + - :PRIORITY_HIGH + - !ruby/object:Api::Type::NestedObject + name: 'dataBoostIsolationReadOnly' + description: | + Data Boost is a serverless compute capability that lets you run high-throughput read jobs and queries on your Bigtable data, without impacting the performance of the clusters that handle your application traffic. Data Boost supports read-only use cases with single-cluster routing. + properties: + - !ruby/object:Api::Type::Enum + name: 'computeBillingOwner' + description: | + The Compute Billing Owner for this Data Boost App Profile. + values: + - :COMPUTE_BILLING_OWNER_UNSPECIFIED + - :HOST_PAYS + - !ruby/object:Api::Type::String + name: 'sourceTable' + description: | + Required. Immutable. Name of the table from which this backup was created. This needs to be in the same instance as the backup. Values are of the form `projects/{project}/instances/{instance}/tables/{source_table}`. + - !ruby/object:Api::Type::String + name: 'sourceBackup' + description: | + Output only. Name of the backup from which this backup was copied. If a backup is not created by copying a backup, this field will be empty. Values are of the form: projects//instances//clusters//backups/ + - !ruby/object:Api::Type::String + name: 'expireTime' + description: | + Required. The expiration time of the backup, with microseconds granularity that must be at least 6 hours and at most 90 days from the time the request is received. Once the `expire_time` has passed, Cloud Bigtable will delete the backup and free the resources used by the backup. + - !ruby/object:Api::Type::String + name: 'startTime' + description: | + Output only. `start_time` is the time that the backup was started (i.e. approximately the time the CreateBackup request is received). The row data in this backup will be no older than this timestamp. + - !ruby/object:Api::Type::String + name: 'endTime' + description: | + Output only. `end_time` is the time that the backup was finished. The row data in the backup will be no newer than this timestamp. + - !ruby/object:Api::Type::String + name: 'sizeBytes' + description: | + Output only. Size of the backup in bytes. + - !ruby/object:Api::Type::Enum + name: 'state' + description: | + Output only. The current state of the backup. + values: + - :STATE_UNSPECIFIED + - :CREATING + - :READY + - !ruby/object:Api::Type::NestedObject + name: 'encryptionInfo' + description: | + Encryption information for a given resource. If this resource is protected with customer managed encryption, the in-use Cloud Key Management Service (Cloud KMS) key version is specified along with its status. + properties: + - !ruby/object:Api::Type::Enum + name: 'encryptionType' + description: | + Output only. The type of encryption used to protect this resource. + values: + - :ENCRYPTION_TYPE_UNSPECIFIED + - :GOOGLE_DEFAULT_ENCRYPTION + - :CUSTOMER_MANAGED_ENCRYPTION + - !ruby/object:Api::Type::NestedObject + name: 'encryptionStatus' + description: | + The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). + properties: + - !ruby/object:Api::Type::Integer + name: 'code' + description: | + The status code, which should be an enum value of google.rpc.Code. + - !ruby/object:Api::Type::String + name: 'message' + description: | + A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client. + - !ruby/object:Api::Type::Array + name: 'details' + description: | + A list of messages that carry the error details. There is a common set of message types for APIs to use. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'kmsKeyVersion' + description: | + Output only. The version of the Cloud KMS key specified in the parent cluster that is in use for the data underlying this table. +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- +name: 'Batch' +description: | + Dataproc Serverless Batches lets you run Spark workloads without requiring you to + provision and manage your own Dataproc cluster. +references: + guides: + 'Dataproc Serverless Batches Intro': 'https://cloud.google.com/dataproc-serverless/docs/overview' + api: 'https://cloud.google.com/dataproc-serverless/docs/reference/rest/v1/projects.locations.batches' +docs: +id_format: 'projects/{{project}}/locations/{{location}}/batches/{{batch_id}}' +base_url: 'projects/{{project}}/locations/{{location}}/batches' +self_link: 'projects/{{project}}/locations/{{location}}/batches/{{batch_id}}' +create_url: 'projects/{{project}}/locations/{{location}}/batches?batchId={{batch_id}}' +delete_url: 'projects/{{project}}/locations/{{location}}/batches/{{batch_id}}' +immutable: true +import_format: + - 'projects/{{project}}/locations/{{location}}/batches/{{batch_id}}' +timeouts: + insert_minutes: 10 + update_minutes: 20 + delete_minutes: 5 +autogen_async: true +async: + actions: ['create', 'delete', 'update'] + type: 'OpAsync' + operation: + base_url: '{{op_id}}' + result: + resource_inside_response: false +collection_url_key: 'batches' +custom_code: + constants: 'templates/terraform/constants/cloud_dataproc_batch.go.tmpl' + decoder: 'templates/terraform/decoders/cloud_dataproc_batch.go.tmpl' +examples: + - name: 'dataproc_batch_spark' + primary_resource_id: 'example_batch_spark' + primary_resource_name: 'fmt.Sprintf("tf-test-spark-batch%s", context["random_suffix"])' + vars: + subnetwork_name: 'default' + prevent_destroy: 'true' + test_env_vars: + project_name: 'PROJECT_NAME' + test_vars_overrides: + 'subnetwork_name': 'acctest.BootstrapSubnetWithFirewallForDataprocBatches(t, "dataproc-spark-test-network", "dataproc-spark-test-subnetwork")' + 'prevent_destroy': 'false' + ignore_read_extra: + - 'runtime_config.0.properties' + - name: 'dataproc_batch_spark_full' + primary_resource_id: 'example_batch_spark' + primary_resource_name: 'fmt.Sprintf("tf-test-spark-batch%s", context["random_suffix"])' + vars: + dataproc_batch: 'dataproc-batch' + prevent_destroy: 'true' + key_name: 'example-key' + keyring_name: 'example-keyring' + bucket_name: 'dataproc-bucket' + test_env_vars: + project_name: 'PROJECT_NAME' + test_vars_overrides: + 'prevent_destroy': 'false' + ignore_read_extra: + - 'runtime_config.0.properties' + - name: 'dataproc_batch_sparksql' + primary_resource_id: 'example_batch_sparsql' + primary_resource_name: 'fmt.Sprintf("tf-test-spark-batch%s", context["random_suffix"])' + vars: + subnetwork_name: 'default' + prevent_destroy: 'true' + test_env_vars: + project_name: 'PROJECT_NAME' + test_vars_overrides: + 'subnetwork_name': 'acctest.BootstrapSubnetWithFirewallForDataprocBatches(t, "dataproc-sparksql-test-network", "dataproc-sparksql-test-subnetwork")' + 'prevent_destroy': 'false' + ignore_read_extra: + - 'runtime_config.0.properties' + - name: 'dataproc_batch_pyspark' + primary_resource_id: 'example_batch_pyspark' + primary_resource_name: 'fmt.Sprintf("tf-test-spark-batch%s", context["random_suffix"])' + vars: + subnetwork_name: 'default' + prevent_destroy: 'true' + test_env_vars: + project_name: 'PROJECT_NAME' + test_vars_overrides: + 'subnetwork_name': 'acctest.BootstrapSubnetWithFirewallForDataprocBatches(t, "dataproc-pyspark-test-network", "dataproc-pyspark-test-subnetwork")' + 'prevent_destroy': 'false' + ignore_read_extra: + - 'runtime_config.0.properties' + - name: 'dataproc_batch_sparkr' + primary_resource_id: 'example_batch_sparkr' + primary_resource_name: 'fmt.Sprintf("tf-test-spark-batch%s", context["random_suffix"])' + vars: + subnetwork_name: 'default' + prevent_destroy: 'true' + test_env_vars: + project_name: 'PROJECT_NAME' + test_vars_overrides: + 'subnetwork_name': 'acctest.BootstrapSubnetWithFirewallForDataprocBatches(t, "dataproc-pyspark-test-network", "dataproc-pyspark-test-subnetwork")' + 'prevent_destroy': 'false' + ignore_read_extra: + - 'runtime_config.0.properties' +parameters: + - name: 'location' + type: String + description: | + The location in which the batch will be created in. + url_param_only: true + immutable: true + - name: 'batchId' + type: String + description: | + The ID to use for the batch, which will become the final component of the batch's resource name. + This value must be 4-63 characters. Valid characters are /[a-z][0-9]-/. + url_param_only: true + immutable: true +properties: + - name: 'name' + type: String + description: | + The resource name of the batch. + output: true + - name: 'uuid' + type: String + description: | + A batch UUID (Unique Universal Identifier). The service generates this value when it creates the batch. + output: true + - name: 'createTime' + type: String + description: | + The time when the batch was created. + output: true + - name: 'runtimeInfo' + type: NestedObject + description: 'Runtime information about batch execution.' + output: true + properties: + - name: 'outputUri' + type: String + description: | + A URI pointing to the location of the stdout and stderr of the workload. + output: true + - name: 'diagnosticOutputUri' + type: String + description: | + A URI pointing to the location of the diagnostics tarball. + output: true + - name: 'endpoints' + type: KeyValuePairs + description: | + Map of remote access endpoints (such as web interfaces and APIs) to their URIs. + output: true + - name: 'approximateUsage' + type: NestedObject + description: | + Approximate workload resource usage, calculated when the workload completes(see [Dataproc Serverless pricing](https://cloud.google.com/dataproc-serverless/pricing)) + output: true + properties: + - name: 'milliDcuSeconds' + type: String + description: | + DCU (Dataproc Compute Units) usage in (milliDCU x seconds) + output: true + - name: 'shuffleStorageGbSeconds' + type: String + description: | + Shuffle storage usage in (GB x seconds) + output: true + - name: 'milliAcceleratorSeconds' + type: String + description: | + Accelerator usage in (milliAccelerator x seconds) + output: true + - name: 'acceleratorType' + type: String + description: | + Accelerator type being used, if any + output: true + - name: 'currentUsage' + type: NestedObject + description: | + Snapshot of current workload resource usage(see [Dataproc Serverless pricing](https://cloud.google.com/dataproc-serverless/pricing)) + output: true + properties: + - name: 'milliDcu' + type: String + description: | + Milli (one-thousandth) Dataproc Compute Units (DCUs). + output: true + - name: 'shuffleStorageGb' + type: String + description: | + Shuffle Storage in gigabytes (GB). + output: true + - name: 'milliDcuPremium' + type: String + description: | + Milli (one-thousandth) Dataproc Compute Units (DCUs) charged at premium tier. + output: true + - name: 'shuffleStorageGbPremium' + type: String + description: | + Shuffle Storage in gigabytes (GB) charged at premium tier. + output: true + - name: 'milliAccelerator' + type: String + description: | + Milli (one-thousandth) accelerator.. + output: true + - name: 'acceleratorType' + type: String + description: | + Accelerator type being used, if any. + output: true + - name: 'snapshotTime' + type: String + description: | + The timestamp of the usage snapshot. + output: true + - name: 'state' + type: String + description: | + The state of the batch. For possible values, see the [API documentation](https://cloud.google.com/dataproc-serverless/docs/reference/rest/v1/projects.locations.batches#State). + output: true + - name: 'stateMessage' + type: String + description: | + Batch state details, such as a failure description if the state is FAILED. + output: true + - name: 'stateTime' + type: String + description: | + Batch state details, such as a failure description if the state is FAILED. + output: true + - name: 'creator' + type: String + description: | + The email address of the user who created the batch. + output: true + - name: 'labels' + type: KeyValueLabels + description: | + The labels to associate with this batch. + - name: 'runtimeConfig' + type: NestedObject + description: | + Runtime configuration for the batch execution. + properties: + - name: 'version' + type: String + description: | + Version of the batch runtime. + default_from_api: true + diff_suppress_func: 'CloudDataprocBatchRuntimeConfigVersionDiffSuppress' + - name: 'containerImage' + type: String + description: | + Optional custom container image for the job runtime environment. If not specified, a default container image will be used. + - name: 'properties' + type: KeyValuePairs + description: | + A mapping of property names to values, which are used to configure workload execution. + - name: 'effective_properties' + type: KeyValuePairs + description: | + A mapping of property names to values, which are used to configure workload execution. + output: true + - name: 'environmentConfig' + type: NestedObject + description: | + Environment configuration for the batch execution. + properties: + - name: 'executionConfig' + type: NestedObject + description: | + Execution configuration for a workload. + properties: + - name: 'serviceAccount' + type: String + description: | + Service account that used to execute workload. + default_from_api: true + - name: 'networkTags' + type: Array + description: | + Tags used for network traffic control. + item_type: + type: String + - name: 'kmsKey' + type: String + description: | + The Cloud KMS key to use for encryption. + - name: 'ttl' + type: String + description: | + The duration after which the workload will be terminated. + When the workload exceeds this duration, it will be unconditionally terminated without waiting for ongoing + work to finish. If ttl is not specified for a batch workload, the workload will be allowed to run until it + exits naturally (or run forever without exiting). If ttl is not specified for an interactive session, + it defaults to 24 hours. If ttl is not specified for a batch that uses 2.1+ runtime version, it defaults to 4 hours. + Minimum value is 10 minutes; maximum value is 14 days. If both ttl and idleTtl are specified (for an interactive session), + the conditions are treated as OR conditions: the workload will be terminated when it has been idle for idleTtl or + when ttl has been exceeded, whichever occurs first. + default_from_api: true + - name: 'stagingBucket' + type: String + description: | + A Cloud Storage bucket used to stage workload dependencies, config files, and store + workload output and other ephemeral data, such as Spark history files. If you do not specify a staging bucket, + Cloud Dataproc will determine a Cloud Storage location according to the region where your workload is running, + and then create and manage project-level, per-location staging and temporary buckets. + This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket. + - name: 'networkUri' + type: String + description: | + Network configuration for workload execution. + conflicts: + - environment_config.0.execution_config.0.subnetwork_uri + - name: 'subnetworkUri' + type: String + description: | + Subnetwork configuration for workload execution. + conflicts: + - environment_config.0.execution_config.0.network_uri + - name: 'peripheralsConfig' + type: NestedObject + description: | + Peripherals configuration that workload has access to. + default_from_api: true + allow_empty_object: true + properties: + - name: 'metastoreService' + type: String + description: | + Resource name of an existing Dataproc Metastore service. + - name: 'sparkHistoryServerConfig' + type: NestedObject + description: | + The Spark History Server configuration for the workload. + properties: + - name: 'dataprocCluster' + type: String + description: | + Resource name of an existing Dataproc Cluster to act as a Spark History Server for the workload. + - name: 'operation' + type: String + description: | + The resource name of the operation associated with this batch. + output: true + - name: 'stateHistory' + type: Array + description: | + Historical state information for the batch. + output: true + item_type: + type: NestedObject + properties: + - name: 'state' + type: String + description: | + The state of the batch at this point in history. For possible values, see the [API documentation](https://cloud.google.com/dataproc-serverless/docs/reference/rest/v1/projects.locations.batches#State). + output: true + - name: 'stateMessage' + type: String + description: | + Details about the state at this point in history. + output: true + - name: 'stateStartTime' + type: String + description: | + The time when the batch entered the historical state. + output: true + - name: 'pysparkBatch' + type: NestedObject + description: | + PySpark batch config. + exactly_one_of: + - 'pyspark_batch' + - 'spark_batch' + - 'spark_sql_batch' + - 'spark_r_batch' + properties: + - name: 'mainPythonFileUri' + type: String + description: | + The HCFS URI of the main Python file to use as the Spark driver. Must be a .py file. + - name: 'args' + type: Array + description: | + The arguments to pass to the driver. Do not include arguments that can be set as batch + properties, such as --conf, since a collision can occur that causes an incorrect batch submission. + item_type: + type: String + - name: 'pythonFileUris' + type: Array + description: | + HCFS file URIs of Python files to pass to the PySpark framework. + Supported file types: .py, .egg, and .zip. + item_type: + type: String + - name: 'jarFileUris' + type: Array + description: | + HCFS URIs of jar files to add to the classpath of the Spark driver and tasks. + item_type: + type: String + - name: 'fileUris' + type: Array + description: | + HCFS URIs of files to be placed in the working directory of each executor. + item_type: + type: String + - name: 'archiveUris' + type: Array + description: | + HCFS URIs of archives to be extracted into the working directory of each executor. + Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. + item_type: + type: String + - name: 'sparkBatch' + type: NestedObject + description: | + Spark batch config. + exactly_one_of: + - 'pyspark_batch' + - 'spark_batch' + - 'spark_sql_batch' + - 'spark_r_batch' + properties: + - name: 'args' + type: Array + description: | + The arguments to pass to the driver. Do not include arguments that can be set as batch + properties, such as --conf, since a collision can occur that causes an incorrect batch submission. + item_type: + type: String + - name: 'jarFileUris' + type: Array + description: | + HCFS URIs of jar files to add to the classpath of the Spark driver and tasks. + item_type: + type: String + - name: 'fileUris' + type: Array + description: | + HCFS URIs of files to be placed in the working directory of each executor. + item_type: + type: String + - name: 'archiveUris' + type: Array + description: | + HCFS URIs of archives to be extracted into the working directory of each executor. + Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. + item_type: + type: String + - name: 'mainJarFileUri' + type: String + description: | + The HCFS URI of the jar file that contains the main class. + exactly_one_of: + - 'spark_batch.0.main_class' + - name: 'mainClass' + type: String + description: | + The name of the driver main class. The jar file that contains the class must be in the + classpath or specified in jarFileUris. + exactly_one_of: + - 'spark_batch.0.main_jar_file_uri' + - name: 'sparkRBatch' + type: NestedObject + description: | + SparkR batch config. + exactly_one_of: + - 'pyspark_batch' + - 'spark_batch' + - 'spark_sql_batch' + - 'spark_r_batch' + properties: + - name: 'mainRFileUri' + type: String + description: | + The HCFS URI of the main R file to use as the driver. Must be a .R or .r file. + - name: 'args' + type: Array + description: | + The arguments to pass to the driver. Do not include arguments that can be set as batch + properties, such as --conf, since a collision can occur that causes an incorrect batch submission. + item_type: + type: String + - name: 'fileUris' + type: Array + description: | + HCFS URIs of files to be placed in the working directory of each executor. + item_type: + type: String + - name: 'archiveUris' + type: Array + description: | + HCFS URIs of archives to be extracted into the working directory of each executor. + Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. + item_type: + type: String + - name: 'sparkSqlBatch' + type: NestedObject + description: | + Spark SQL batch config. + exactly_one_of: + - 'pyspark_batch' + - 'spark_batch' + - 'spark_sql_batch' + - 'spark_r_batch' + properties: + - name: 'queryFileUri' + type: String + description: | + The HCFS URI of the script that contains Spark SQL queries to execute. + - name: 'jarFileUris' + type: Array + description: | + HCFS URIs of jar files to be added to the Spark CLASSPATH. + item_type: + type: String + - name: 'queryVariables' + type: KeyValuePairs + description: | + Mapping of query variable names to values (equivalent to the Spark SQL command: SET name="value";). diff --git a/mmv1/products/bigtableadmin/inspec.yaml b/mmv1/products/bigtableadmin/inspec.yaml new file mode 100644 index 000000000..28cb365e9 --- /dev/null +++ b/mmv1/products/bigtableadmin/inspec.yaml @@ -0,0 +1,15 @@ +# Copyright 2017 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- !ruby/object:Provider::Inspec::Config +overrides: !ruby/object:Overrides::ResourceOverrides diff --git a/mmv1/products/cloudkms/api.yaml b/mmv1/products/cloudkms/api.yaml new file mode 100644 index 000000000..7c27fc6ba --- /dev/null +++ b/mmv1/products/cloudkms/api.yaml @@ -0,0 +1,54 @@ + +--- !ruby/object:Api::Product +name: cloudkms +display_name: cloudkms +versions: + - !ruby/object:Api::Product::Version + name: ga + base_url: https://cloudkms.googleapis.com//v1/ +scopes: + - https://cloudkms.googleapis.com//auth/cloud-platform +apis_required: + - !ruby/object:Api::Product::ApiReference + name: https://cloudkms.googleapis.com/ + url: https://console.cloud.google.com/apis/library/cloudkms.googleapis.com/ +objects: + + - !ruby/object:Api::Resource + name: ProjectLocationKeyRing + base_url: '{{parent}}/keyRings' + self_link: '{{name}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/cloudkms/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + A KeyRing is a toplevel logical grouping of CryptoKeys. + properties: + + - !ruby/object:Api::Type::String + name: 'name' + description: | + Output only. The resource name for the KeyRing in the format `projects/*/locations/*/keyRings/*`. + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Output only. The time at which this KeyRing was created. + diff --git a/mmv1/products/cloudkms/inspec.yaml b/mmv1/products/cloudkms/inspec.yaml new file mode 100644 index 000000000..28cb365e9 --- /dev/null +++ b/mmv1/products/cloudkms/inspec.yaml @@ -0,0 +1,15 @@ +# Copyright 2017 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- !ruby/object:Provider::Inspec::Config +overrides: !ruby/object:Overrides::ResourceOverrides diff --git a/mmv1/products/cloudscheduler/api.yaml b/mmv1/products/cloudscheduler/api.yaml index 1bebf6b4d..aea4f3772 100644 --- a/mmv1/products/cloudscheduler/api.yaml +++ b/mmv1/products/cloudscheduler/api.yaml @@ -71,15 +71,14 @@ objects: The value of this field must be a time zone name from the tz database. required: false default_value: 'Etc/UTC' - - !ruby/object:Api::Type::String + - !ruby/object:Api::Type::Enum name: state + default_value: :ENABLED description: | - State of the job. - output: true - - !ruby/object:Api::Type::Boolean - name: paused - description: | - Sets the job to a paused state. Jobs default to being enabled when this property is not set. + State of the job. + values: + - :ENABLED + - :PAUSED - !ruby/object:Api::Type::String name: attemptDeadline description: | diff --git a/mmv1/products/cloudscheduler/terraform.yaml b/mmv1/products/cloudscheduler/terraform.yaml index dda48349d..f5065c40a 100644 --- a/mmv1/products/cloudscheduler/terraform.yaml +++ b/mmv1/products/cloudscheduler/terraform.yaml @@ -34,11 +34,6 @@ overrides: !ruby/object:Overrides::ResourceOverrides primary_resource_id: "job" vars: job_name: "test-job" - - !ruby/object:Provider::Terraform::Examples - name: "scheduler_job_paused" - primary_resource_id: "job" - vars: - job_name: "test-job" - !ruby/object:Provider::Terraform::Examples name: "scheduler_job_app_engine" primary_resource_id: "job" @@ -61,10 +56,6 @@ overrides: !ruby/object:Overrides::ResourceOverrides name: !ruby/object:Overrides::Terraform::PropertyOverride custom_expand: 'templates/terraform/custom_expand/shortname_to_url.go.erb' custom_flatten: 'templates/terraform/custom_flatten/name_from_self_link.erb' - paused: !ruby/object:Overrides::Terraform::PropertyOverride - required: false - default_from_api: true - custom_flatten: templates/terraform/custom_flatten/cloud_scheduler_paused.go.erb httpTarget.body: !ruby/object:Overrides::Terraform::PropertyOverride validation: !ruby/object:Provider::Terraform::Validation function: 'validateBase64String' diff --git a/mmv1/products/composer/api.yaml b/mmv1/products/composer/api.yaml new file mode 100644 index 000000000..82be9467c --- /dev/null +++ b/mmv1/products/composer/api.yaml @@ -0,0 +1,2064 @@ +--- !ruby/object:Api::Product +name: composer +display_name: composer +versions: + - !ruby/object:Api::Product::Version + name: ga + base_url: https://composer.googleapis.com//V1/ +scopes: + - https://composer.googleapis.com//auth/cloud-platform +apis_required: + - !ruby/object:Api::Product::ApiReference + name: https://composer.googleapis.com/ + url: https://console.cloud.google.com/apis/library/composer.googleapis.com/ +objects: + + - !ruby/object:Api::Resource + name: ProjectLocationImageVersion + base_url: '{{parent}}/imageVersions' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/composer/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + The ImageVersions in a project and location. + properties: + + - !ruby/object:Api::Type::Array + name: 'imageVersions' + description: | + The list of supported ImageVersions in a location. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'nextPageToken' + description: | + The page token used to query for the next page if one exists. + + + + - !ruby/object:Api::Resource + name: ProjectLocationEnvironment + base_url: '{{parent}}/environments' + self_link: '{{name}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/composer/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + An environment for running orchestration tasks. + properties: + + - !ruby/object:Api::Type::String + name: 'name' + description: | + The resource name of the environment, in the form: "projects/{projectId}/locations/{locationId}/environments/{environmentId}" EnvironmentId must start with a lowercase letter followed by up to 63 lowercase letters, numbers, or hyphens, and cannot end with a hyphen. + - !ruby/object:Api::Type::NestedObject + name: 'config' + description: | + Configuration information for an environment. + properties: + - !ruby/object:Api::Type::String + name: 'gkeCluster' + description: | + Output only. The Kubernetes Engine cluster used to run this environment. + - !ruby/object:Api::Type::String + name: 'dagGcsPrefix' + description: | + Output only. The Cloud Storage prefix of the DAGs for this environment. Although Cloud Storage objects reside in a flat namespace, a hierarchical file tree can be simulated using "/"-delimited object name prefixes. DAG objects for this environment reside in a simulated directory with the given prefix. + - !ruby/object:Api::Type::Integer + name: 'nodeCount' + description: | + The number of nodes in the Kubernetes Engine cluster that will be used to run this environment. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. + - !ruby/object:Api::Type::NestedObject + name: 'softwareConfig' + description: | + Specifies the selection and configuration of software inside the environment. + properties: + - !ruby/object:Api::Type::String + name: 'imageVersion' + description: | + The version of the software running in the environment. This encapsulates both the version of Cloud Composer functionality and the version of Apache Airflow. It must match the regular expression `composer-([0-9]+(\.[0-9]+\.[0-9]+(-preview\.[0-9]+)?)?|latest)-airflow-([0-9]+(\.[0-9]+(\.[0-9]+)?)?)`. When used as input, the server also checks if the provided version is supported and denies the request for an unsupported version. The Cloud Composer portion of the image version is a full [semantic version](https://semver.org), or an alias in the form of major version number or `latest`. When an alias is provided, the server replaces it with the current Cloud Composer version that satisfies the alias. The Apache Airflow portion of the image version is a full semantic version that points to one of the supported Apache Airflow versions, or an alias in the form of only major or major.minor versions specified. When an alias is provided, the server replaces it with the latest Apache Airflow version that satisfies the alias and is supported in the given Cloud Composer version. In all cases, the resolved image version is stored in the same field. See also [version list](/composer/docs/concepts/versioning/composer-versions) and [versioning overview](/composer/docs/concepts/versioning/composer-versioning-overview). + - !ruby/object:Api::Type::NestedObject + name: 'airflowConfigOverrides' + description: | + Optional. Apache Airflow configuration properties to override. Property keys contain the section and property names, separated by a hyphen, for example "core-dags_are_paused_at_creation". Section names must not contain hyphens ("-"), opening square brackets ("["), or closing square brackets ("]"). The property name must not be empty and must not contain an equals sign ("=") or semicolon (";"). Section and property names must not contain a period ("."). Apache Airflow configuration property names must be written in [snake_case](https://en.wikipedia.org/wiki/Snake_case). Property values can contain any character, and can be written in any lower/upper case format. Certain Apache Airflow configuration property values are [blocked](/composer/docs/concepts/airflow-configurations), and cannot be overridden. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'pypiPackages' + description: | + Optional. Custom Python Package Index (PyPI) packages to be installed in the environment. Keys refer to the lowercase package name such as "numpy" and values are the lowercase extras and version specifier such as "==1.12.0", "[devel,gcp_api]", or "[devel]>=1.8.2, <1.9.2". To specify a package without pinning it to a version specifier, use the empty string as the value. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'envVariables' + description: | + Optional. Additional environment variables to provide to the Apache Airflow scheduler, worker, and webserver processes. Environment variable names must match the regular expression `a-zA-Z_*`. They cannot specify Apache Airflow software configuration overrides (they cannot match the regular expression `AIRFLOW__[A-Z0-9_]+__[A-Z0-9_]+`), and they cannot match any of the following reserved names: * `AIRFLOW_HOME` * `C_FORCE_ROOT` * `CONTAINER_NAME` * `DAGS_FOLDER` * `GCP_PROJECT` * `GCS_BUCKET` * `GKE_CLUSTER_NAME` * `SQL_DATABASE` * `SQL_INSTANCE` * `SQL_PASSWORD` * `SQL_PROJECT` * `SQL_REGION` * `SQL_USER` + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::String + name: 'pythonVersion' + description: | + Optional. The major version of Python used to run the Apache Airflow scheduler, worker, and webserver processes. Can be set to '2' or '3'. If not specified, the default is '3'. Cannot be updated. This field is only supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. Environments in newer versions always use Python major version 3. + - !ruby/object:Api::Type::Integer + name: 'schedulerCount' + description: | + Optional. The number of schedulers for Airflow. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-2.*.*. + - !ruby/object:Api::Type::NestedObject + name: 'nodeConfig' + description: | + The configuration information for the Kubernetes Engine nodes running the Apache Airflow software. + properties: + - !ruby/object:Api::Type::String + name: 'location' + description: | + Optional. The Compute Engine [zone](/compute/docs/regions-zones) in which to deploy the VMs used to run the Apache Airflow software, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: "projects/{projectId}/zones/{zoneId}". This `location` must belong to the enclosing environment's project and location. If both this field and `nodeConfig.machineType` are specified, `nodeConfig.machineType` must belong to this `location`; if both are unspecified, the service will pick a zone in the Compute Engine region corresponding to the Cloud Composer location, and propagate that choice to both fields. If only one field (`location` or `nodeConfig.machineType`) is specified, the location information from the specified field will be propagated to the unspecified field. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. + - !ruby/object:Api::Type::String + name: 'machineType' + description: | + Optional. The Compute Engine [machine type](/compute/docs/machine-types) used for cluster instances, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: "projects/{projectId}/zones/{zoneId}/machineTypes/{machineTypeId}". The `machineType` must belong to the enclosing environment's project and location. If both this field and `nodeConfig.location` are specified, this `machineType` must belong to the `nodeConfig.location`; if both are unspecified, the service will pick a zone in the Compute Engine region corresponding to the Cloud Composer location, and propagate that choice to both fields. If exactly one of this field and `nodeConfig.location` is specified, the location information from the specified field will be propagated to the unspecified field. The `machineTypeId` must not be a [shared-core machine type](/compute/docs/machine-types#sharedcore). If this field is unspecified, the `machineTypeId` defaults to "n1-standard-1". This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. + - !ruby/object:Api::Type::String + name: 'network' + description: | + Optional. The Compute Engine network to be used for machine communications, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: "projects/{projectId}/global/networks/{networkId}". If unspecified, the "default" network ID in the environment's project is used. If a [Custom Subnet Network](/vpc/docs/vpc#vpc_networks_and_subnets) is provided, `nodeConfig.subnetwork` must also be provided. For [Shared VPC](/vpc/docs/shared-vpc) subnetwork requirements, see `nodeConfig.subnetwork`. + - !ruby/object:Api::Type::String + name: 'subnetwork' + description: | + Optional. The Compute Engine subnetwork to be used for machine communications, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: "projects/{projectId}/regions/{regionId}/subnetworks/{subnetworkId}" If a subnetwork is provided, `nodeConfig.network` must also be provided, and the subnetwork must belong to the enclosing environment's project and location. + - !ruby/object:Api::Type::Integer + name: 'diskSizeGb' + description: | + Optional. The disk size in GB used for node VMs. Minimum size is 30GB. If unspecified, defaults to 100GB. Cannot be updated. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. + - !ruby/object:Api::Type::Array + name: 'oauthScopes' + description: | + Optional. The set of Google API scopes to be made available on all node VMs. If `oauth_scopes` is empty, defaults to ["https://www.googleapis.com/auth/cloud-platform"]. Cannot be updated. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'serviceAccount' + description: | + Optional. The Google Cloud Platform Service Account to be used by the node VMs. If a service account is not specified, the "default" Compute Engine service account is used. Cannot be updated. + - !ruby/object:Api::Type::Array + name: 'tags' + description: | + Optional. The list of instance tags applied to all node VMs. Tags are used to identify valid sources or targets for network firewalls. Each tag within the list must comply with [RFC1035](https://www.ietf.org/rfc/rfc1035.txt). Cannot be updated. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'ipAllocationPolicy' + description: | + Configuration for controlling how IPs are allocated in the GKE cluster running the Apache Airflow software. + properties: + - !ruby/object:Api::Type::Boolean + name: 'useIpAliases' + description: | + Optional. Whether or not to enable Alias IPs in the GKE cluster. If `true`, a VPC-native cluster is created. This field is only supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. Environments in newer versions always use VPC-native GKE clusters. + - !ruby/object:Api::Type::String + name: 'clusterSecondaryRangeName' + description: | + Optional. The name of the GKE cluster's secondary range used to allocate IP addresses to pods. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. + - !ruby/object:Api::Type::String + name: 'clusterIpv4CidrBlock' + description: | + Optional. The IP address range used to allocate IP addresses to pods in the GKE cluster. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use. + - !ruby/object:Api::Type::String + name: 'servicesSecondaryRangeName' + description: | + Optional. The name of the services' secondary range used to allocate IP addresses to the GKE cluster. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. + - !ruby/object:Api::Type::String + name: 'servicesIpv4CidrBlock' + description: | + Optional. The IP address range of the services IP addresses in this GKE cluster. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use. + - !ruby/object:Api::Type::Boolean + name: 'enableIpMasqAgent' + description: | + Optional. Deploys 'ip-masq-agent' daemon set in the GKE cluster and defines nonMasqueradeCIDRs equals to pod IP range so IP masquerading is used for all destination addresses, except between pods traffic. See: https://cloud.google.com/kubernetes-engine/docs/how-to/ip-masquerade-agent + - !ruby/object:Api::Type::NestedObject + name: 'privateEnvironmentConfig' + description: | + The configuration information for configuring a Private IP Cloud Composer environment. + properties: + - !ruby/object:Api::Type::Boolean + name: 'enablePrivateEnvironment' + description: | + Optional. If `true`, a Private IP Cloud Composer environment is created. If this field is set to true, `IPAllocationPolicy.use_ip_aliases` must be set to true for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. + - !ruby/object:Api::Type::NestedObject + name: 'privateClusterConfig' + description: | + Configuration options for the private GKE cluster in a Cloud Composer environment. + properties: + - !ruby/object:Api::Type::Boolean + name: 'enablePrivateEndpoint' + description: | + Optional. If `true`, access to the public endpoint of the GKE cluster is denied. + - !ruby/object:Api::Type::String + name: 'masterIpv4CidrBlock' + description: | + Optional. The CIDR block from which IPv4 range for GKE master will be reserved. If left blank, the default value of '172.16.0.0/23' is used. + - !ruby/object:Api::Type::String + name: 'masterIpv4ReservedRange' + description: | + Output only. The IP range in CIDR notation to use for the hosted master network. This range is used for assigning internal IP addresses to the GKE cluster master or set of masters and to the internal load balancer virtual IP. This range must not overlap with any other ranges in use within the cluster's network. + - !ruby/object:Api::Type::String + name: 'webServerIpv4CidrBlock' + description: | + Optional. The CIDR block from which IP range for web server will be reserved. Needs to be disjoint from `private_cluster_config.master_ipv4_cidr_block` and `cloud_sql_ipv4_cidr_block`. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. + - !ruby/object:Api::Type::String + name: 'cloudSqlIpv4CidrBlock' + description: | + Optional. The CIDR block from which IP range in tenant project will be reserved for Cloud SQL. Needs to be disjoint from `web_server_ipv4_cidr_block`. + - !ruby/object:Api::Type::String + name: 'webServerIpv4ReservedRange' + description: | + Output only. The IP range reserved for the tenant project's App Engine VMs. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. + - !ruby/object:Api::Type::String + name: 'cloudComposerNetworkIpv4CidrBlock' + description: | + Optional. The CIDR block from which IP range for Cloud Composer Network in tenant project will be reserved. Needs to be disjoint from private_cluster_config.master_ipv4_cidr_block and cloud_sql_ipv4_cidr_block. This field is supported for Cloud Composer environments in versions composer-2.*.*-airflow-*.*.* and newer. + - !ruby/object:Api::Type::String + name: 'cloudComposerNetworkIpv4ReservedRange' + description: | + Output only. The IP range reserved for the tenant project's Cloud Composer network. This field is supported for Cloud Composer environments in versions composer-2.*.*-airflow-*.*.* and newer. + - !ruby/object:Api::Type::Boolean + name: 'enablePrivatelyUsedPublicIps' + description: | + Optional. When enabled, IPs from public (non-RFC1918) ranges can be used for `IPAllocationPolicy.cluster_ipv4_cidr_block` and `IPAllocationPolicy.service_ipv4_cidr_block`. + - !ruby/object:Api::Type::String + name: 'cloudComposerConnectionSubnetwork' + description: | + Optional. When specified, the environment will use Private Service Connect instead of VPC peerings to connect to Cloud SQL in the Tenant Project, and the PSC endpoint in the Customer Project will use an IP address from this subnetwork. + - !ruby/object:Api::Type::NestedObject + name: 'networkingConfig' + description: | + Configuration options for networking connections in the Composer 2 environment. + properties: + - !ruby/object:Api::Type::Enum + name: 'connectionType' + description: | + Optional. Indicates the user requested specifc connection type between Tenant and Customer projects. You cannot set networking connection type in public IP environment. + values: + - :CONNECTION_TYPE_UNSPECIFIED + - :VPC_PEERING + - :PRIVATE_SERVICE_CONNECT + - !ruby/object:Api::Type::NestedObject + name: 'webServerNetworkAccessControl' + description: | + Network-level access control policy for the Airflow web server. + properties: + - !ruby/object:Api::Type::Array + name: 'allowedIpRanges' + description: | + A collection of allowed IP ranges with descriptions. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'value' + description: | + IP address or range, defined using CIDR notation, of requests that this rule applies to. Examples: `192.168.1.1` or `192.168.0.0/16` or `2001:db8::/32` or `2001:0db8:0000:0042:0000:8a2e:0370:7334`. IP range prefixes should be properly truncated. For example, `1.2.3.4/24` should be truncated to `1.2.3.0/24`. Similarly, for IPv6, `2001:db8::1/32` should be truncated to `2001:db8::/32`. + - !ruby/object:Api::Type::String + name: 'description' + description: | + Optional. User-provided description. It must contain at most 300 characters. + - !ruby/object:Api::Type::NestedObject + name: 'databaseConfig' + description: | + The configuration of Cloud SQL instance that is used by the Apache Airflow software. + properties: + - !ruby/object:Api::Type::String + name: 'machineType' + description: | + Optional. Cloud SQL machine type used by Airflow database. It has to be one of: db-n1-standard-2, db-n1-standard-4, db-n1-standard-8 or db-n1-standard-16. If not specified, db-n1-standard-2 will be used. Supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. + - !ruby/object:Api::Type::String + name: 'zone' + description: | + Optional. The Compute Engine zone where the Airflow database is created. If zone is provided, it must be in the region selected for the environment. If zone is not provided, a zone is automatically selected. The zone can only be set during environment creation. Supported for Cloud Composer environments in versions composer-2.*.*-airflow-*.*.*. + - !ruby/object:Api::Type::NestedObject + name: 'webServerConfig' + description: | + The configuration settings for the Airflow web server App Engine instance. Supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.* + properties: + - !ruby/object:Api::Type::String + name: 'machineType' + description: | + Optional. Machine type on which Airflow web server is running. It has to be one of: composer-n1-webserver-2, composer-n1-webserver-4 or composer-n1-webserver-8. If not specified, composer-n1-webserver-2 will be used. Value custom is returned only in response, if Airflow web server parameters were manually changed to a non-standard values. + - !ruby/object:Api::Type::NestedObject + name: 'encryptionConfig' + description: | + The encryption options for the Cloud Composer environment and its dependencies.Supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. + properties: + - !ruby/object:Api::Type::String + name: 'kmsKeyName' + description: | + Optional. Customer-managed Encryption Key available through Google's Key Management Service. Cannot be updated. If not specified, Google-managed key will be used. + - !ruby/object:Api::Type::NestedObject + name: 'maintenanceWindow' + description: | + The configuration settings for Cloud Composer maintenance window. The following example: ``` { "startTime":"2019-08-01T01:00:00Z" "endTime":"2019-08-01T07:00:00Z" "recurrence":"FREQ=WEEKLY;BYDAY=TU,WE" } ``` would define a maintenance window between 01 and 07 hours UTC during each Tuesday and Wednesday. + properties: + - !ruby/object:Api::Type::String + name: 'startTime' + description: | + Required. Start time of the first recurrence of the maintenance window. + - !ruby/object:Api::Type::String + name: 'endTime' + description: | + Required. Maintenance window end time. It is used only to calculate the duration of the maintenance window. The value for end-time must be in the future, relative to `start_time`. + - !ruby/object:Api::Type::String + name: 'recurrence' + description: | + Required. Maintenance window recurrence. Format is a subset of [RFC-5545](https://tools.ietf.org/html/rfc5545) `RRULE`. The only allowed values for `FREQ` field are `FREQ=DAILY` and `FREQ=WEEKLY;BYDAY=...` Example values: `FREQ=WEEKLY;BYDAY=TU,WE`, `FREQ=DAILY`. + - !ruby/object:Api::Type::NestedObject + name: 'workloadsConfig' + description: | + The Kubernetes workloads configuration for GKE cluster associated with the Cloud Composer environment. Supported for Cloud Composer environments in versions composer-2.*.*-airflow-*.*.* and newer. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'scheduler' + description: | + Configuration for resources used by Airflow schedulers. + properties: + - !ruby/object:Api::Type::Integer + name: 'cpu' + description: | + Optional. CPU request and limit for a single Airflow scheduler replica. + - !ruby/object:Api::Type::Integer + name: 'memoryGb' + description: | + Optional. Memory (GB) request and limit for a single Airflow scheduler replica. + - !ruby/object:Api::Type::Integer + name: 'storageGb' + description: | + Optional. Storage (GB) request and limit for a single Airflow scheduler replica. + - !ruby/object:Api::Type::Integer + name: 'count' + description: | + Optional. The number of schedulers. + - !ruby/object:Api::Type::NestedObject + name: 'webServer' + description: | + Configuration for resources used by Airflow web server. + properties: + - !ruby/object:Api::Type::Integer + name: 'cpu' + description: | + Optional. CPU request and limit for Airflow web server. + - !ruby/object:Api::Type::Integer + name: 'memoryGb' + description: | + Optional. Memory (GB) request and limit for Airflow web server. + - !ruby/object:Api::Type::Integer + name: 'storageGb' + description: | + Optional. Storage (GB) request and limit for Airflow web server. + - !ruby/object:Api::Type::NestedObject + name: 'worker' + description: | + Configuration for resources used by Airflow workers. + properties: + - !ruby/object:Api::Type::Integer + name: 'cpu' + description: | + Optional. CPU request and limit for a single Airflow worker replica. + - !ruby/object:Api::Type::Integer + name: 'memoryGb' + description: | + Optional. Memory (GB) request and limit for a single Airflow worker replica. + - !ruby/object:Api::Type::Integer + name: 'storageGb' + description: | + Optional. Storage (GB) request and limit for a single Airflow worker replica. + - !ruby/object:Api::Type::Integer + name: 'minCount' + description: | + Optional. Minimum number of workers for autoscaling. + - !ruby/object:Api::Type::Integer + name: 'maxCount' + description: | + Optional. Maximum number of workers for autoscaling. + - !ruby/object:Api::Type::NestedObject + name: 'triggerer' + description: | + Configuration for resources used by Airflow triggerers. + properties: + - !ruby/object:Api::Type::Integer + name: 'count' + description: | + Optional. The number of triggerers. + - !ruby/object:Api::Type::Integer + name: 'cpu' + description: | + Optional. CPU request and limit for a single Airflow triggerer replica. + - !ruby/object:Api::Type::Integer + name: 'memoryGb' + description: | + Optional. Memory (GB) request and limit for a single Airflow triggerer replica. + - !ruby/object:Api::Type::Enum + name: 'environmentSize' + description: | + Optional. The size of the Cloud Composer environment. This field is supported for Cloud Composer environments in versions composer-2.*.*-airflow-*.*.* and newer. + values: + - :ENVIRONMENT_SIZE_UNSPECIFIED + - :ENVIRONMENT_SIZE_SMALL + - :ENVIRONMENT_SIZE_MEDIUM + - :ENVIRONMENT_SIZE_LARGE + - !ruby/object:Api::Type::String + name: 'airflowUri' + description: | + Output only. The URI of the Apache Airflow Web UI hosted within this environment (see [Airflow web interface](/composer/docs/how-to/accessing/airflow-web-interface)). + - !ruby/object:Api::Type::String + name: 'airflowByoidUri' + description: | + Output only. The 'bring your own identity' variant of the URI of the Apache Airflow Web UI hosted within this environment, to be accessed with external identities using workforce identity federation (see [Access environments with workforce identity federation](/composer/docs/composer-2/access-environments-with-workforce-identity-federation)). + - !ruby/object:Api::Type::NestedObject + name: 'masterAuthorizedNetworksConfig' + description: | + Configuration options for the master authorized networks feature. Enabled master authorized networks will disallow all external traffic to access Kubernetes master through HTTPS except traffic from the given CIDR blocks, Google Compute Engine Public IPs and Google Prod IPs. + properties: + - !ruby/object:Api::Type::Boolean + name: 'enabled' + description: | + Whether or not master authorized networks feature is enabled. + - !ruby/object:Api::Type::Array + name: 'cidrBlocks' + description: | + Up to 50 external networks that could access Kubernetes master through HTTPS. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'displayName' + description: | + User-defined name that identifies the CIDR block. + - !ruby/object:Api::Type::String + name: 'cidrBlock' + description: | + CIDR block that must be specified in CIDR notation. + - !ruby/object:Api::Type::NestedObject + name: 'recoveryConfig' + description: | + The Recovery settings of an environment. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'scheduledSnapshotsConfig' + description: | + The configuration for scheduled snapshot creation mechanism. + properties: + - !ruby/object:Api::Type::Boolean + name: 'enabled' + description: | + Optional. Whether scheduled snapshots creation is enabled. + - !ruby/object:Api::Type::String + name: 'snapshotLocation' + description: | + Optional. The Cloud Storage location for storing automatically created snapshots. + - !ruby/object:Api::Type::String + name: 'snapshotCreationSchedule' + description: | + Optional. The cron expression representing the time when snapshots creation mechanism runs. This field is subject to additional validation around frequency of execution. + - !ruby/object:Api::Type::String + name: 'timeZone' + description: | + Optional. Time zone that sets the context to interpret snapshot_creation_schedule. + - !ruby/object:Api::Type::Enum + name: 'resilienceMode' + description: | + Optional. Resilience mode of the Cloud Composer Environment. This field is supported for Cloud Composer environments in versions composer-2.2.0-airflow-*.*.* and newer. + values: + - :RESILIENCE_MODE_UNSPECIFIED + - :HIGH_RESILIENCE + - !ruby/object:Api::Type::String + name: 'uuid' + description: | + Output only. The UUID (Universally Unique IDentifier) associated with this environment. This value is generated when the environment is created. + - !ruby/object:Api::Type::Enum + name: 'state' + description: | + The current state of the environment. + values: + - :STATE_UNSPECIFIED + - :CREATING + - :RUNNING + - :UPDATING + - :DELETING + - :ERROR + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Output only. The time at which this environment was created. + - !ruby/object:Api::Type::String + name: 'updateTime' + description: | + Output only. The time at which this environment was last modified. + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + Optional. User-defined labels for this environment. The labels map can contain no more than 64 entries. Entries of the labels map are UTF8 strings that comply with the following restrictions: * Keys must conform to regexp: \p{Ll}\p{Lo}{0,62} * Values must conform to regexp: [\p{Ll}\p{Lo}\p{N}_-]{0,63} * Both keys and values are additionally constrained to be <= 128 bytes in size. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::Boolean + name: 'satisfiesPzs' + description: | + Output only. Reserved for future use. + - !ruby/object:Api::Type::NestedObject + name: 'storageConfig' + description: | + The configuration for data storage in the environment. + properties: + - !ruby/object:Api::Type::String + name: 'bucket' + description: | + Optional. The name of the Cloud Storage bucket used by the environment. No `gs://` prefix. + + + + + - !ruby/object:Api::Resource + name: ProjectLocationEnvironment + base_url: '{{parent}}/environments' + self_link: '{{name}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/composer/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + An environment for running orchestration tasks. + properties: + + - !ruby/object:Api::Type::String + name: 'name' + description: | + The resource name of the environment, in the form: "projects/{projectId}/locations/{locationId}/environments/{environmentId}" EnvironmentId must start with a lowercase letter followed by up to 63 lowercase letters, numbers, or hyphens, and cannot end with a hyphen. + - !ruby/object:Api::Type::NestedObject + name: 'config' + description: | + Configuration information for an environment. + properties: + - !ruby/object:Api::Type::String + name: 'gkeCluster' + description: | + Output only. The Kubernetes Engine cluster used to run this environment. + - !ruby/object:Api::Type::String + name: 'dagGcsPrefix' + description: | + Output only. The Cloud Storage prefix of the DAGs for this environment. Although Cloud Storage objects reside in a flat namespace, a hierarchical file tree can be simulated using "/"-delimited object name prefixes. DAG objects for this environment reside in a simulated directory with the given prefix. + - !ruby/object:Api::Type::Integer + name: 'nodeCount' + description: | + The number of nodes in the Kubernetes Engine cluster that will be used to run this environment. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. + - !ruby/object:Api::Type::NestedObject + name: 'softwareConfig' + description: | + Specifies the selection and configuration of software inside the environment. + properties: + - !ruby/object:Api::Type::String + name: 'imageVersion' + description: | + The version of the software running in the environment. This encapsulates both the version of Cloud Composer functionality and the version of Apache Airflow. It must match the regular expression `composer-([0-9]+(\.[0-9]+\.[0-9]+(-preview\.[0-9]+)?)?|latest)-airflow-([0-9]+(\.[0-9]+(\.[0-9]+)?)?)`. When used as input, the server also checks if the provided version is supported and denies the request for an unsupported version. The Cloud Composer portion of the image version is a full [semantic version](https://semver.org), or an alias in the form of major version number or `latest`. When an alias is provided, the server replaces it with the current Cloud Composer version that satisfies the alias. The Apache Airflow portion of the image version is a full semantic version that points to one of the supported Apache Airflow versions, or an alias in the form of only major or major.minor versions specified. When an alias is provided, the server replaces it with the latest Apache Airflow version that satisfies the alias and is supported in the given Cloud Composer version. In all cases, the resolved image version is stored in the same field. See also [version list](/composer/docs/concepts/versioning/composer-versions) and [versioning overview](/composer/docs/concepts/versioning/composer-versioning-overview). + - !ruby/object:Api::Type::NestedObject + name: 'airflowConfigOverrides' + description: | + Optional. Apache Airflow configuration properties to override. Property keys contain the section and property names, separated by a hyphen, for example "core-dags_are_paused_at_creation". Section names must not contain hyphens ("-"), opening square brackets ("["), or closing square brackets ("]"). The property name must not be empty and must not contain an equals sign ("=") or semicolon (";"). Section and property names must not contain a period ("."). Apache Airflow configuration property names must be written in [snake_case](https://en.wikipedia.org/wiki/Snake_case). Property values can contain any character, and can be written in any lower/upper case format. Certain Apache Airflow configuration property values are [blocked](/composer/docs/concepts/airflow-configurations), and cannot be overridden. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'pypiPackages' + description: | + Optional. Custom Python Package Index (PyPI) packages to be installed in the environment. Keys refer to the lowercase package name such as "numpy" and values are the lowercase extras and version specifier such as "==1.12.0", "[devel,gcp_api]", or "[devel]>=1.8.2, <1.9.2". To specify a package without pinning it to a version specifier, use the empty string as the value. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'envVariables' + description: | + Optional. Additional environment variables to provide to the Apache Airflow scheduler, worker, and webserver processes. Environment variable names must match the regular expression `a-zA-Z_*`. They cannot specify Apache Airflow software configuration overrides (they cannot match the regular expression `AIRFLOW__[A-Z0-9_]+__[A-Z0-9_]+`), and they cannot match any of the following reserved names: * `AIRFLOW_HOME` * `C_FORCE_ROOT` * `CONTAINER_NAME` * `DAGS_FOLDER` * `GCP_PROJECT` * `GCS_BUCKET` * `GKE_CLUSTER_NAME` * `SQL_DATABASE` * `SQL_INSTANCE` * `SQL_PASSWORD` * `SQL_PROJECT` * `SQL_REGION` * `SQL_USER` + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::String + name: 'pythonVersion' + description: | + Optional. The major version of Python used to run the Apache Airflow scheduler, worker, and webserver processes. Can be set to '2' or '3'. If not specified, the default is '3'. Cannot be updated. This field is only supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. Environments in newer versions always use Python major version 3. + - !ruby/object:Api::Type::Integer + name: 'schedulerCount' + description: | + Optional. The number of schedulers for Airflow. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-2.*.*. + - !ruby/object:Api::Type::NestedObject + name: 'nodeConfig' + description: | + The configuration information for the Kubernetes Engine nodes running the Apache Airflow software. + properties: + - !ruby/object:Api::Type::String + name: 'location' + description: | + Optional. The Compute Engine [zone](/compute/docs/regions-zones) in which to deploy the VMs used to run the Apache Airflow software, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: "projects/{projectId}/zones/{zoneId}". This `location` must belong to the enclosing environment's project and location. If both this field and `nodeConfig.machineType` are specified, `nodeConfig.machineType` must belong to this `location`; if both are unspecified, the service will pick a zone in the Compute Engine region corresponding to the Cloud Composer location, and propagate that choice to both fields. If only one field (`location` or `nodeConfig.machineType`) is specified, the location information from the specified field will be propagated to the unspecified field. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. + - !ruby/object:Api::Type::String + name: 'machineType' + description: | + Optional. The Compute Engine [machine type](/compute/docs/machine-types) used for cluster instances, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: "projects/{projectId}/zones/{zoneId}/machineTypes/{machineTypeId}". The `machineType` must belong to the enclosing environment's project and location. If both this field and `nodeConfig.location` are specified, this `machineType` must belong to the `nodeConfig.location`; if both are unspecified, the service will pick a zone in the Compute Engine region corresponding to the Cloud Composer location, and propagate that choice to both fields. If exactly one of this field and `nodeConfig.location` is specified, the location information from the specified field will be propagated to the unspecified field. The `machineTypeId` must not be a [shared-core machine type](/compute/docs/machine-types#sharedcore). If this field is unspecified, the `machineTypeId` defaults to "n1-standard-1". This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. + - !ruby/object:Api::Type::String + name: 'network' + description: | + Optional. The Compute Engine network to be used for machine communications, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: "projects/{projectId}/global/networks/{networkId}". If unspecified, the "default" network ID in the environment's project is used. If a [Custom Subnet Network](/vpc/docs/vpc#vpc_networks_and_subnets) is provided, `nodeConfig.subnetwork` must also be provided. For [Shared VPC](/vpc/docs/shared-vpc) subnetwork requirements, see `nodeConfig.subnetwork`. + - !ruby/object:Api::Type::String + name: 'subnetwork' + description: | + Optional. The Compute Engine subnetwork to be used for machine communications, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: "projects/{projectId}/regions/{regionId}/subnetworks/{subnetworkId}" If a subnetwork is provided, `nodeConfig.network` must also be provided, and the subnetwork must belong to the enclosing environment's project and location. + - !ruby/object:Api::Type::Integer + name: 'diskSizeGb' + description: | + Optional. The disk size in GB used for node VMs. Minimum size is 30GB. If unspecified, defaults to 100GB. Cannot be updated. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. + - !ruby/object:Api::Type::Array + name: 'oauthScopes' + description: | + Optional. The set of Google API scopes to be made available on all node VMs. If `oauth_scopes` is empty, defaults to ["https://www.googleapis.com/auth/cloud-platform"]. Cannot be updated. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'serviceAccount' + description: | + Optional. The Google Cloud Platform Service Account to be used by the node VMs. If a service account is not specified, the "default" Compute Engine service account is used. Cannot be updated. + - !ruby/object:Api::Type::Array + name: 'tags' + description: | + Optional. The list of instance tags applied to all node VMs. Tags are used to identify valid sources or targets for network firewalls. Each tag within the list must comply with [RFC1035](https://www.ietf.org/rfc/rfc1035.txt). Cannot be updated. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'ipAllocationPolicy' + description: | + Configuration for controlling how IPs are allocated in the GKE cluster running the Apache Airflow software. + properties: + - !ruby/object:Api::Type::Boolean + name: 'useIpAliases' + description: | + Optional. Whether or not to enable Alias IPs in the GKE cluster. If `true`, a VPC-native cluster is created. This field is only supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. Environments in newer versions always use VPC-native GKE clusters. + - !ruby/object:Api::Type::String + name: 'clusterSecondaryRangeName' + description: | + Optional. The name of the GKE cluster's secondary range used to allocate IP addresses to pods. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. + - !ruby/object:Api::Type::String + name: 'clusterIpv4CidrBlock' + description: | + Optional. The IP address range used to allocate IP addresses to pods in the GKE cluster. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use. + - !ruby/object:Api::Type::String + name: 'servicesSecondaryRangeName' + description: | + Optional. The name of the services' secondary range used to allocate IP addresses to the GKE cluster. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. + - !ruby/object:Api::Type::String + name: 'servicesIpv4CidrBlock' + description: | + Optional. The IP address range of the services IP addresses in this GKE cluster. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use. + - !ruby/object:Api::Type::Boolean + name: 'enableIpMasqAgent' + description: | + Optional. Deploys 'ip-masq-agent' daemon set in the GKE cluster and defines nonMasqueradeCIDRs equals to pod IP range so IP masquerading is used for all destination addresses, except between pods traffic. See: https://cloud.google.com/kubernetes-engine/docs/how-to/ip-masquerade-agent + - !ruby/object:Api::Type::NestedObject + name: 'privateEnvironmentConfig' + description: | + The configuration information for configuring a Private IP Cloud Composer environment. + properties: + - !ruby/object:Api::Type::Boolean + name: 'enablePrivateEnvironment' + description: | + Optional. If `true`, a Private IP Cloud Composer environment is created. If this field is set to true, `IPAllocationPolicy.use_ip_aliases` must be set to true for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. + - !ruby/object:Api::Type::NestedObject + name: 'privateClusterConfig' + description: | + Configuration options for the private GKE cluster in a Cloud Composer environment. + properties: + - !ruby/object:Api::Type::Boolean + name: 'enablePrivateEndpoint' + description: | + Optional. If `true`, access to the public endpoint of the GKE cluster is denied. + - !ruby/object:Api::Type::String + name: 'masterIpv4CidrBlock' + description: | + Optional. The CIDR block from which IPv4 range for GKE master will be reserved. If left blank, the default value of '172.16.0.0/23' is used. + - !ruby/object:Api::Type::String + name: 'masterIpv4ReservedRange' + description: | + Output only. The IP range in CIDR notation to use for the hosted master network. This range is used for assigning internal IP addresses to the GKE cluster master or set of masters and to the internal load balancer virtual IP. This range must not overlap with any other ranges in use within the cluster's network. + - !ruby/object:Api::Type::String + name: 'webServerIpv4CidrBlock' + description: | + Optional. The CIDR block from which IP range for web server will be reserved. Needs to be disjoint from `private_cluster_config.master_ipv4_cidr_block` and `cloud_sql_ipv4_cidr_block`. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. + - !ruby/object:Api::Type::String + name: 'cloudSqlIpv4CidrBlock' + description: | + Optional. The CIDR block from which IP range in tenant project will be reserved for Cloud SQL. Needs to be disjoint from `web_server_ipv4_cidr_block`. + - !ruby/object:Api::Type::String + name: 'webServerIpv4ReservedRange' + description: | + Output only. The IP range reserved for the tenant project's App Engine VMs. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. + - !ruby/object:Api::Type::String + name: 'cloudComposerNetworkIpv4CidrBlock' + description: | + Optional. The CIDR block from which IP range for Cloud Composer Network in tenant project will be reserved. Needs to be disjoint from private_cluster_config.master_ipv4_cidr_block and cloud_sql_ipv4_cidr_block. This field is supported for Cloud Composer environments in versions composer-2.*.*-airflow-*.*.* and newer. + - !ruby/object:Api::Type::String + name: 'cloudComposerNetworkIpv4ReservedRange' + description: | + Output only. The IP range reserved for the tenant project's Cloud Composer network. This field is supported for Cloud Composer environments in versions composer-2.*.*-airflow-*.*.* and newer. + - !ruby/object:Api::Type::Boolean + name: 'enablePrivatelyUsedPublicIps' + description: | + Optional. When enabled, IPs from public (non-RFC1918) ranges can be used for `IPAllocationPolicy.cluster_ipv4_cidr_block` and `IPAllocationPolicy.service_ipv4_cidr_block`. + - !ruby/object:Api::Type::String + name: 'cloudComposerConnectionSubnetwork' + description: | + Optional. When specified, the environment will use Private Service Connect instead of VPC peerings to connect to Cloud SQL in the Tenant Project, and the PSC endpoint in the Customer Project will use an IP address from this subnetwork. + - !ruby/object:Api::Type::NestedObject + name: 'networkingConfig' + description: | + Configuration options for networking connections in the Composer 2 environment. + properties: + - !ruby/object:Api::Type::Enum + name: 'connectionType' + description: | + Optional. Indicates the user requested specifc connection type between Tenant and Customer projects. You cannot set networking connection type in public IP environment. + values: + - :CONNECTION_TYPE_UNSPECIFIED + - :VPC_PEERING + - :PRIVATE_SERVICE_CONNECT + - !ruby/object:Api::Type::NestedObject + name: 'webServerNetworkAccessControl' + description: | + Network-level access control policy for the Airflow web server. + properties: + - !ruby/object:Api::Type::Array + name: 'allowedIpRanges' + description: | + A collection of allowed IP ranges with descriptions. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'value' + description: | + IP address or range, defined using CIDR notation, of requests that this rule applies to. Examples: `192.168.1.1` or `192.168.0.0/16` or `2001:db8::/32` or `2001:0db8:0000:0042:0000:8a2e:0370:7334`. IP range prefixes should be properly truncated. For example, `1.2.3.4/24` should be truncated to `1.2.3.0/24`. Similarly, for IPv6, `2001:db8::1/32` should be truncated to `2001:db8::/32`. + - !ruby/object:Api::Type::String + name: 'description' + description: | + Optional. User-provided description. It must contain at most 300 characters. + - !ruby/object:Api::Type::NestedObject + name: 'databaseConfig' + description: | + The configuration of Cloud SQL instance that is used by the Apache Airflow software. + properties: + - !ruby/object:Api::Type::String + name: 'machineType' + description: | + Optional. Cloud SQL machine type used by Airflow database. It has to be one of: db-n1-standard-2, db-n1-standard-4, db-n1-standard-8 or db-n1-standard-16. If not specified, db-n1-standard-2 will be used. Supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. + - !ruby/object:Api::Type::String + name: 'zone' + description: | + Optional. The Compute Engine zone where the Airflow database is created. If zone is provided, it must be in the region selected for the environment. If zone is not provided, a zone is automatically selected. The zone can only be set during environment creation. Supported for Cloud Composer environments in versions composer-2.*.*-airflow-*.*.*. + - !ruby/object:Api::Type::NestedObject + name: 'webServerConfig' + description: | + The configuration settings for the Airflow web server App Engine instance. Supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.* + properties: + - !ruby/object:Api::Type::String + name: 'machineType' + description: | + Optional. Machine type on which Airflow web server is running. It has to be one of: composer-n1-webserver-2, composer-n1-webserver-4 or composer-n1-webserver-8. If not specified, composer-n1-webserver-2 will be used. Value custom is returned only in response, if Airflow web server parameters were manually changed to a non-standard values. + - !ruby/object:Api::Type::NestedObject + name: 'encryptionConfig' + description: | + The encryption options for the Cloud Composer environment and its dependencies.Supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. + properties: + - !ruby/object:Api::Type::String + name: 'kmsKeyName' + description: | + Optional. Customer-managed Encryption Key available through Google's Key Management Service. Cannot be updated. If not specified, Google-managed key will be used. + - !ruby/object:Api::Type::NestedObject + name: 'maintenanceWindow' + description: | + The configuration settings for Cloud Composer maintenance window. The following example: ``` { "startTime":"2019-08-01T01:00:00Z" "endTime":"2019-08-01T07:00:00Z" "recurrence":"FREQ=WEEKLY;BYDAY=TU,WE" } ``` would define a maintenance window between 01 and 07 hours UTC during each Tuesday and Wednesday. + properties: + - !ruby/object:Api::Type::String + name: 'startTime' + description: | + Required. Start time of the first recurrence of the maintenance window. + - !ruby/object:Api::Type::String + name: 'endTime' + description: | + Required. Maintenance window end time. It is used only to calculate the duration of the maintenance window. The value for end-time must be in the future, relative to `start_time`. + - !ruby/object:Api::Type::String + name: 'recurrence' + description: | + Required. Maintenance window recurrence. Format is a subset of [RFC-5545](https://tools.ietf.org/html/rfc5545) `RRULE`. The only allowed values for `FREQ` field are `FREQ=DAILY` and `FREQ=WEEKLY;BYDAY=...` Example values: `FREQ=WEEKLY;BYDAY=TU,WE`, `FREQ=DAILY`. + - !ruby/object:Api::Type::NestedObject + name: 'workloadsConfig' + description: | + The Kubernetes workloads configuration for GKE cluster associated with the Cloud Composer environment. Supported for Cloud Composer environments in versions composer-2.*.*-airflow-*.*.* and newer. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'scheduler' + description: | + Configuration for resources used by Airflow schedulers. + properties: + - !ruby/object:Api::Type::Integer + name: 'cpu' + description: | + Optional. CPU request and limit for a single Airflow scheduler replica. + - !ruby/object:Api::Type::Integer + name: 'memoryGb' + description: | + Optional. Memory (GB) request and limit for a single Airflow scheduler replica. + - !ruby/object:Api::Type::Integer + name: 'storageGb' + description: | + Optional. Storage (GB) request and limit for a single Airflow scheduler replica. + - !ruby/object:Api::Type::Integer + name: 'count' + description: | + Optional. The number of schedulers. + - !ruby/object:Api::Type::NestedObject + name: 'webServer' + description: | + Configuration for resources used by Airflow web server. + properties: + - !ruby/object:Api::Type::Integer + name: 'cpu' + description: | + Optional. CPU request and limit for Airflow web server. + - !ruby/object:Api::Type::Integer + name: 'memoryGb' + description: | + Optional. Memory (GB) request and limit for Airflow web server. + - !ruby/object:Api::Type::Integer + name: 'storageGb' + description: | + Optional. Storage (GB) request and limit for Airflow web server. + - !ruby/object:Api::Type::NestedObject + name: 'worker' + description: | + Configuration for resources used by Airflow workers. + properties: + - !ruby/object:Api::Type::Integer + name: 'cpu' + description: | + Optional. CPU request and limit for a single Airflow worker replica. + - !ruby/object:Api::Type::Integer + name: 'memoryGb' + description: | + Optional. Memory (GB) request and limit for a single Airflow worker replica. + - !ruby/object:Api::Type::Integer + name: 'storageGb' + description: | + Optional. Storage (GB) request and limit for a single Airflow worker replica. + - !ruby/object:Api::Type::Integer + name: 'minCount' + description: | + Optional. Minimum number of workers for autoscaling. + - !ruby/object:Api::Type::Integer + name: 'maxCount' + description: | + Optional. Maximum number of workers for autoscaling. + - !ruby/object:Api::Type::NestedObject + name: 'triggerer' + description: | + Configuration for resources used by Airflow triggerers. + properties: + - !ruby/object:Api::Type::Integer + name: 'count' + description: | + Optional. The number of triggerers. + - !ruby/object:Api::Type::Integer + name: 'cpu' + description: | + Optional. CPU request and limit for a single Airflow triggerer replica. + - !ruby/object:Api::Type::Integer + name: 'memoryGb' + description: | + Optional. Memory (GB) request and limit for a single Airflow triggerer replica. + - !ruby/object:Api::Type::Enum + name: 'environmentSize' + description: | + Optional. The size of the Cloud Composer environment. This field is supported for Cloud Composer environments in versions composer-2.*.*-airflow-*.*.* and newer. + values: + - :ENVIRONMENT_SIZE_UNSPECIFIED + - :ENVIRONMENT_SIZE_SMALL + - :ENVIRONMENT_SIZE_MEDIUM + - :ENVIRONMENT_SIZE_LARGE + - !ruby/object:Api::Type::String + name: 'airflowUri' + description: | + Output only. The URI of the Apache Airflow Web UI hosted within this environment (see [Airflow web interface](/composer/docs/how-to/accessing/airflow-web-interface)). + - !ruby/object:Api::Type::String + name: 'airflowByoidUri' + description: | + Output only. The 'bring your own identity' variant of the URI of the Apache Airflow Web UI hosted within this environment, to be accessed with external identities using workforce identity federation (see [Access environments with workforce identity federation](/composer/docs/composer-2/access-environments-with-workforce-identity-federation)). + - !ruby/object:Api::Type::NestedObject + name: 'masterAuthorizedNetworksConfig' + description: | + Configuration options for the master authorized networks feature. Enabled master authorized networks will disallow all external traffic to access Kubernetes master through HTTPS except traffic from the given CIDR blocks, Google Compute Engine Public IPs and Google Prod IPs. + properties: + - !ruby/object:Api::Type::Boolean + name: 'enabled' + description: | + Whether or not master authorized networks feature is enabled. + - !ruby/object:Api::Type::Array + name: 'cidrBlocks' + description: | + Up to 50 external networks that could access Kubernetes master through HTTPS. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'displayName' + description: | + User-defined name that identifies the CIDR block. + - !ruby/object:Api::Type::String + name: 'cidrBlock' + description: | + CIDR block that must be specified in CIDR notation. + - !ruby/object:Api::Type::NestedObject + name: 'recoveryConfig' + description: | + The Recovery settings of an environment. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'scheduledSnapshotsConfig' + description: | + The configuration for scheduled snapshot creation mechanism. + properties: + - !ruby/object:Api::Type::Boolean + name: 'enabled' + description: | + Optional. Whether scheduled snapshots creation is enabled. + - !ruby/object:Api::Type::String + name: 'snapshotLocation' + description: | + Optional. The Cloud Storage location for storing automatically created snapshots. + - !ruby/object:Api::Type::String + name: 'snapshotCreationSchedule' + description: | + Optional. The cron expression representing the time when snapshots creation mechanism runs. This field is subject to additional validation around frequency of execution. + - !ruby/object:Api::Type::String + name: 'timeZone' + description: | + Optional. Time zone that sets the context to interpret snapshot_creation_schedule. + - !ruby/object:Api::Type::Enum + name: 'resilienceMode' + description: | + Optional. Resilience mode of the Cloud Composer Environment. This field is supported for Cloud Composer environments in versions composer-2.2.0-airflow-*.*.* and newer. + values: + - :RESILIENCE_MODE_UNSPECIFIED + - :HIGH_RESILIENCE + - !ruby/object:Api::Type::String + name: 'uuid' + description: | + Output only. The UUID (Universally Unique IDentifier) associated with this environment. This value is generated when the environment is created. + - !ruby/object:Api::Type::Enum + name: 'state' + description: | + The current state of the environment. + values: + - :STATE_UNSPECIFIED + - :CREATING + - :RUNNING + - :UPDATING + - :DELETING + - :ERROR + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Output only. The time at which this environment was created. + - !ruby/object:Api::Type::String + name: 'updateTime' + description: | + Output only. The time at which this environment was last modified. + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + Optional. User-defined labels for this environment. The labels map can contain no more than 64 entries. Entries of the labels map are UTF8 strings that comply with the following restrictions: * Keys must conform to regexp: \p{Ll}\p{Lo}{0,62} * Values must conform to regexp: [\p{Ll}\p{Lo}\p{N}_-]{0,63} * Both keys and values are additionally constrained to be <= 128 bytes in size. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::Boolean + name: 'satisfiesPzs' + description: | + Output only. Reserved for future use. + - !ruby/object:Api::Type::NestedObject + name: 'storageConfig' + description: | + The configuration for data storage in the environment. + properties: + - !ruby/object:Api::Type::String + name: 'bucket' + description: | + Optional. The name of the Cloud Storage bucket used by the environment. No `gs://` prefix. + + + + + - !ruby/object:Api::Resource + name: ProjectLocationEnvironment + base_url: '{{parent}}/environments' + self_link: '{{name}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/composer/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + An environment for running orchestration tasks. + properties: + + - !ruby/object:Api::Type::String + name: 'name' + description: | + The resource name of the environment, in the form: "projects/{projectId}/locations/{locationId}/environments/{environmentId}" EnvironmentId must start with a lowercase letter followed by up to 63 lowercase letters, numbers, or hyphens, and cannot end with a hyphen. + - !ruby/object:Api::Type::NestedObject + name: 'config' + description: | + Configuration information for an environment. + properties: + - !ruby/object:Api::Type::String + name: 'gkeCluster' + description: | + Output only. The Kubernetes Engine cluster used to run this environment. + - !ruby/object:Api::Type::String + name: 'dagGcsPrefix' + description: | + Output only. The Cloud Storage prefix of the DAGs for this environment. Although Cloud Storage objects reside in a flat namespace, a hierarchical file tree can be simulated using "/"-delimited object name prefixes. DAG objects for this environment reside in a simulated directory with the given prefix. + - !ruby/object:Api::Type::Integer + name: 'nodeCount' + description: | + The number of nodes in the Kubernetes Engine cluster that will be used to run this environment. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. + - !ruby/object:Api::Type::NestedObject + name: 'softwareConfig' + description: | + Specifies the selection and configuration of software inside the environment. + properties: + - !ruby/object:Api::Type::String + name: 'imageVersion' + description: | + The version of the software running in the environment. This encapsulates both the version of Cloud Composer functionality and the version of Apache Airflow. It must match the regular expression `composer-([0-9]+(\.[0-9]+\.[0-9]+(-preview\.[0-9]+)?)?|latest)-airflow-([0-9]+(\.[0-9]+(\.[0-9]+)?)?)`. When used as input, the server also checks if the provided version is supported and denies the request for an unsupported version. The Cloud Composer portion of the image version is a full [semantic version](https://semver.org), or an alias in the form of major version number or `latest`. When an alias is provided, the server replaces it with the current Cloud Composer version that satisfies the alias. The Apache Airflow portion of the image version is a full semantic version that points to one of the supported Apache Airflow versions, or an alias in the form of only major or major.minor versions specified. When an alias is provided, the server replaces it with the latest Apache Airflow version that satisfies the alias and is supported in the given Cloud Composer version. In all cases, the resolved image version is stored in the same field. See also [version list](/composer/docs/concepts/versioning/composer-versions) and [versioning overview](/composer/docs/concepts/versioning/composer-versioning-overview). + - !ruby/object:Api::Type::NestedObject + name: 'airflowConfigOverrides' + description: | + Optional. Apache Airflow configuration properties to override. Property keys contain the section and property names, separated by a hyphen, for example "core-dags_are_paused_at_creation". Section names must not contain hyphens ("-"), opening square brackets ("["), or closing square brackets ("]"). The property name must not be empty and must not contain an equals sign ("=") or semicolon (";"). Section and property names must not contain a period ("."). Apache Airflow configuration property names must be written in [snake_case](https://en.wikipedia.org/wiki/Snake_case). Property values can contain any character, and can be written in any lower/upper case format. Certain Apache Airflow configuration property values are [blocked](/composer/docs/concepts/airflow-configurations), and cannot be overridden. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'pypiPackages' + description: | + Optional. Custom Python Package Index (PyPI) packages to be installed in the environment. Keys refer to the lowercase package name such as "numpy" and values are the lowercase extras and version specifier such as "==1.12.0", "[devel,gcp_api]", or "[devel]>=1.8.2, <1.9.2". To specify a package without pinning it to a version specifier, use the empty string as the value. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'envVariables' + description: | + Optional. Additional environment variables to provide to the Apache Airflow scheduler, worker, and webserver processes. Environment variable names must match the regular expression `a-zA-Z_*`. They cannot specify Apache Airflow software configuration overrides (they cannot match the regular expression `AIRFLOW__[A-Z0-9_]+__[A-Z0-9_]+`), and they cannot match any of the following reserved names: * `AIRFLOW_HOME` * `C_FORCE_ROOT` * `CONTAINER_NAME` * `DAGS_FOLDER` * `GCP_PROJECT` * `GCS_BUCKET` * `GKE_CLUSTER_NAME` * `SQL_DATABASE` * `SQL_INSTANCE` * `SQL_PASSWORD` * `SQL_PROJECT` * `SQL_REGION` * `SQL_USER` + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::String + name: 'pythonVersion' + description: | + Optional. The major version of Python used to run the Apache Airflow scheduler, worker, and webserver processes. Can be set to '2' or '3'. If not specified, the default is '3'. Cannot be updated. This field is only supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. Environments in newer versions always use Python major version 3. + - !ruby/object:Api::Type::Integer + name: 'schedulerCount' + description: | + Optional. The number of schedulers for Airflow. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-2.*.*. + - !ruby/object:Api::Type::NestedObject + name: 'nodeConfig' + description: | + The configuration information for the Kubernetes Engine nodes running the Apache Airflow software. + properties: + - !ruby/object:Api::Type::String + name: 'location' + description: | + Optional. The Compute Engine [zone](/compute/docs/regions-zones) in which to deploy the VMs used to run the Apache Airflow software, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: "projects/{projectId}/zones/{zoneId}". This `location` must belong to the enclosing environment's project and location. If both this field and `nodeConfig.machineType` are specified, `nodeConfig.machineType` must belong to this `location`; if both are unspecified, the service will pick a zone in the Compute Engine region corresponding to the Cloud Composer location, and propagate that choice to both fields. If only one field (`location` or `nodeConfig.machineType`) is specified, the location information from the specified field will be propagated to the unspecified field. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. + - !ruby/object:Api::Type::String + name: 'machineType' + description: | + Optional. The Compute Engine [machine type](/compute/docs/machine-types) used for cluster instances, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: "projects/{projectId}/zones/{zoneId}/machineTypes/{machineTypeId}". The `machineType` must belong to the enclosing environment's project and location. If both this field and `nodeConfig.location` are specified, this `machineType` must belong to the `nodeConfig.location`; if both are unspecified, the service will pick a zone in the Compute Engine region corresponding to the Cloud Composer location, and propagate that choice to both fields. If exactly one of this field and `nodeConfig.location` is specified, the location information from the specified field will be propagated to the unspecified field. The `machineTypeId` must not be a [shared-core machine type](/compute/docs/machine-types#sharedcore). If this field is unspecified, the `machineTypeId` defaults to "n1-standard-1". This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. + - !ruby/object:Api::Type::String + name: 'network' + description: | + Optional. The Compute Engine network to be used for machine communications, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: "projects/{projectId}/global/networks/{networkId}". If unspecified, the "default" network ID in the environment's project is used. If a [Custom Subnet Network](/vpc/docs/vpc#vpc_networks_and_subnets) is provided, `nodeConfig.subnetwork` must also be provided. For [Shared VPC](/vpc/docs/shared-vpc) subnetwork requirements, see `nodeConfig.subnetwork`. + - !ruby/object:Api::Type::String + name: 'subnetwork' + description: | + Optional. The Compute Engine subnetwork to be used for machine communications, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: "projects/{projectId}/regions/{regionId}/subnetworks/{subnetworkId}" If a subnetwork is provided, `nodeConfig.network` must also be provided, and the subnetwork must belong to the enclosing environment's project and location. + - !ruby/object:Api::Type::Integer + name: 'diskSizeGb' + description: | + Optional. The disk size in GB used for node VMs. Minimum size is 30GB. If unspecified, defaults to 100GB. Cannot be updated. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. + - !ruby/object:Api::Type::Array + name: 'oauthScopes' + description: | + Optional. The set of Google API scopes to be made available on all node VMs. If `oauth_scopes` is empty, defaults to ["https://www.googleapis.com/auth/cloud-platform"]. Cannot be updated. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'serviceAccount' + description: | + Optional. The Google Cloud Platform Service Account to be used by the node VMs. If a service account is not specified, the "default" Compute Engine service account is used. Cannot be updated. + - !ruby/object:Api::Type::Array + name: 'tags' + description: | + Optional. The list of instance tags applied to all node VMs. Tags are used to identify valid sources or targets for network firewalls. Each tag within the list must comply with [RFC1035](https://www.ietf.org/rfc/rfc1035.txt). Cannot be updated. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'ipAllocationPolicy' + description: | + Configuration for controlling how IPs are allocated in the GKE cluster running the Apache Airflow software. + properties: + - !ruby/object:Api::Type::Boolean + name: 'useIpAliases' + description: | + Optional. Whether or not to enable Alias IPs in the GKE cluster. If `true`, a VPC-native cluster is created. This field is only supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. Environments in newer versions always use VPC-native GKE clusters. + - !ruby/object:Api::Type::String + name: 'clusterSecondaryRangeName' + description: | + Optional. The name of the GKE cluster's secondary range used to allocate IP addresses to pods. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. + - !ruby/object:Api::Type::String + name: 'clusterIpv4CidrBlock' + description: | + Optional. The IP address range used to allocate IP addresses to pods in the GKE cluster. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use. + - !ruby/object:Api::Type::String + name: 'servicesSecondaryRangeName' + description: | + Optional. The name of the services' secondary range used to allocate IP addresses to the GKE cluster. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. + - !ruby/object:Api::Type::String + name: 'servicesIpv4CidrBlock' + description: | + Optional. The IP address range of the services IP addresses in this GKE cluster. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use. + - !ruby/object:Api::Type::Boolean + name: 'enableIpMasqAgent' + description: | + Optional. Deploys 'ip-masq-agent' daemon set in the GKE cluster and defines nonMasqueradeCIDRs equals to pod IP range so IP masquerading is used for all destination addresses, except between pods traffic. See: https://cloud.google.com/kubernetes-engine/docs/how-to/ip-masquerade-agent + - !ruby/object:Api::Type::NestedObject + name: 'privateEnvironmentConfig' + description: | + The configuration information for configuring a Private IP Cloud Composer environment. + properties: + - !ruby/object:Api::Type::Boolean + name: 'enablePrivateEnvironment' + description: | + Optional. If `true`, a Private IP Cloud Composer environment is created. If this field is set to true, `IPAllocationPolicy.use_ip_aliases` must be set to true for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. + - !ruby/object:Api::Type::NestedObject + name: 'privateClusterConfig' + description: | + Configuration options for the private GKE cluster in a Cloud Composer environment. + properties: + - !ruby/object:Api::Type::Boolean + name: 'enablePrivateEndpoint' + description: | + Optional. If `true`, access to the public endpoint of the GKE cluster is denied. + - !ruby/object:Api::Type::String + name: 'masterIpv4CidrBlock' + description: | + Optional. The CIDR block from which IPv4 range for GKE master will be reserved. If left blank, the default value of '172.16.0.0/23' is used. + - !ruby/object:Api::Type::String + name: 'masterIpv4ReservedRange' + description: | + Output only. The IP range in CIDR notation to use for the hosted master network. This range is used for assigning internal IP addresses to the GKE cluster master or set of masters and to the internal load balancer virtual IP. This range must not overlap with any other ranges in use within the cluster's network. + - !ruby/object:Api::Type::String + name: 'webServerIpv4CidrBlock' + description: | + Optional. The CIDR block from which IP range for web server will be reserved. Needs to be disjoint from `private_cluster_config.master_ipv4_cidr_block` and `cloud_sql_ipv4_cidr_block`. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. + - !ruby/object:Api::Type::String + name: 'cloudSqlIpv4CidrBlock' + description: | + Optional. The CIDR block from which IP range in tenant project will be reserved for Cloud SQL. Needs to be disjoint from `web_server_ipv4_cidr_block`. + - !ruby/object:Api::Type::String + name: 'webServerIpv4ReservedRange' + description: | + Output only. The IP range reserved for the tenant project's App Engine VMs. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. + - !ruby/object:Api::Type::String + name: 'cloudComposerNetworkIpv4CidrBlock' + description: | + Optional. The CIDR block from which IP range for Cloud Composer Network in tenant project will be reserved. Needs to be disjoint from private_cluster_config.master_ipv4_cidr_block and cloud_sql_ipv4_cidr_block. This field is supported for Cloud Composer environments in versions composer-2.*.*-airflow-*.*.* and newer. + - !ruby/object:Api::Type::String + name: 'cloudComposerNetworkIpv4ReservedRange' + description: | + Output only. The IP range reserved for the tenant project's Cloud Composer network. This field is supported for Cloud Composer environments in versions composer-2.*.*-airflow-*.*.* and newer. + - !ruby/object:Api::Type::Boolean + name: 'enablePrivatelyUsedPublicIps' + description: | + Optional. When enabled, IPs from public (non-RFC1918) ranges can be used for `IPAllocationPolicy.cluster_ipv4_cidr_block` and `IPAllocationPolicy.service_ipv4_cidr_block`. + - !ruby/object:Api::Type::String + name: 'cloudComposerConnectionSubnetwork' + description: | + Optional. When specified, the environment will use Private Service Connect instead of VPC peerings to connect to Cloud SQL in the Tenant Project, and the PSC endpoint in the Customer Project will use an IP address from this subnetwork. + - !ruby/object:Api::Type::NestedObject + name: 'networkingConfig' + description: | + Configuration options for networking connections in the Composer 2 environment. + properties: + - !ruby/object:Api::Type::Enum + name: 'connectionType' + description: | + Optional. Indicates the user requested specifc connection type between Tenant and Customer projects. You cannot set networking connection type in public IP environment. + values: + - :CONNECTION_TYPE_UNSPECIFIED + - :VPC_PEERING + - :PRIVATE_SERVICE_CONNECT + - !ruby/object:Api::Type::NestedObject + name: 'webServerNetworkAccessControl' + description: | + Network-level access control policy for the Airflow web server. + properties: + - !ruby/object:Api::Type::Array + name: 'allowedIpRanges' + description: | + A collection of allowed IP ranges with descriptions. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'value' + description: | + IP address or range, defined using CIDR notation, of requests that this rule applies to. Examples: `192.168.1.1` or `192.168.0.0/16` or `2001:db8::/32` or `2001:0db8:0000:0042:0000:8a2e:0370:7334`. IP range prefixes should be properly truncated. For example, `1.2.3.4/24` should be truncated to `1.2.3.0/24`. Similarly, for IPv6, `2001:db8::1/32` should be truncated to `2001:db8::/32`. + - !ruby/object:Api::Type::String + name: 'description' + description: | + Optional. User-provided description. It must contain at most 300 characters. + - !ruby/object:Api::Type::NestedObject + name: 'databaseConfig' + description: | + The configuration of Cloud SQL instance that is used by the Apache Airflow software. + properties: + - !ruby/object:Api::Type::String + name: 'machineType' + description: | + Optional. Cloud SQL machine type used by Airflow database. It has to be one of: db-n1-standard-2, db-n1-standard-4, db-n1-standard-8 or db-n1-standard-16. If not specified, db-n1-standard-2 will be used. Supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. + - !ruby/object:Api::Type::String + name: 'zone' + description: | + Optional. The Compute Engine zone where the Airflow database is created. If zone is provided, it must be in the region selected for the environment. If zone is not provided, a zone is automatically selected. The zone can only be set during environment creation. Supported for Cloud Composer environments in versions composer-2.*.*-airflow-*.*.*. + - !ruby/object:Api::Type::NestedObject + name: 'webServerConfig' + description: | + The configuration settings for the Airflow web server App Engine instance. Supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.* + properties: + - !ruby/object:Api::Type::String + name: 'machineType' + description: | + Optional. Machine type on which Airflow web server is running. It has to be one of: composer-n1-webserver-2, composer-n1-webserver-4 or composer-n1-webserver-8. If not specified, composer-n1-webserver-2 will be used. Value custom is returned only in response, if Airflow web server parameters were manually changed to a non-standard values. + - !ruby/object:Api::Type::NestedObject + name: 'encryptionConfig' + description: | + The encryption options for the Cloud Composer environment and its dependencies.Supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. + properties: + - !ruby/object:Api::Type::String + name: 'kmsKeyName' + description: | + Optional. Customer-managed Encryption Key available through Google's Key Management Service. Cannot be updated. If not specified, Google-managed key will be used. + - !ruby/object:Api::Type::NestedObject + name: 'maintenanceWindow' + description: | + The configuration settings for Cloud Composer maintenance window. The following example: ``` { "startTime":"2019-08-01T01:00:00Z" "endTime":"2019-08-01T07:00:00Z" "recurrence":"FREQ=WEEKLY;BYDAY=TU,WE" } ``` would define a maintenance window between 01 and 07 hours UTC during each Tuesday and Wednesday. + properties: + - !ruby/object:Api::Type::String + name: 'startTime' + description: | + Required. Start time of the first recurrence of the maintenance window. + - !ruby/object:Api::Type::String + name: 'endTime' + description: | + Required. Maintenance window end time. It is used only to calculate the duration of the maintenance window. The value for end-time must be in the future, relative to `start_time`. + - !ruby/object:Api::Type::String + name: 'recurrence' + description: | + Required. Maintenance window recurrence. Format is a subset of [RFC-5545](https://tools.ietf.org/html/rfc5545) `RRULE`. The only allowed values for `FREQ` field are `FREQ=DAILY` and `FREQ=WEEKLY;BYDAY=...` Example values: `FREQ=WEEKLY;BYDAY=TU,WE`, `FREQ=DAILY`. + - !ruby/object:Api::Type::NestedObject + name: 'workloadsConfig' + description: | + The Kubernetes workloads configuration for GKE cluster associated with the Cloud Composer environment. Supported for Cloud Composer environments in versions composer-2.*.*-airflow-*.*.* and newer. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'scheduler' + description: | + Configuration for resources used by Airflow schedulers. + properties: + - !ruby/object:Api::Type::Integer + name: 'cpu' + description: | + Optional. CPU request and limit for a single Airflow scheduler replica. + - !ruby/object:Api::Type::Integer + name: 'memoryGb' + description: | + Optional. Memory (GB) request and limit for a single Airflow scheduler replica. + - !ruby/object:Api::Type::Integer + name: 'storageGb' + description: | + Optional. Storage (GB) request and limit for a single Airflow scheduler replica. + - !ruby/object:Api::Type::Integer + name: 'count' + description: | + Optional. The number of schedulers. + - !ruby/object:Api::Type::NestedObject + name: 'webServer' + description: | + Configuration for resources used by Airflow web server. + properties: + - !ruby/object:Api::Type::Integer + name: 'cpu' + description: | + Optional. CPU request and limit for Airflow web server. + - !ruby/object:Api::Type::Integer + name: 'memoryGb' + description: | + Optional. Memory (GB) request and limit for Airflow web server. + - !ruby/object:Api::Type::Integer + name: 'storageGb' + description: | + Optional. Storage (GB) request and limit for Airflow web server. + - !ruby/object:Api::Type::NestedObject + name: 'worker' + description: | + Configuration for resources used by Airflow workers. + properties: + - !ruby/object:Api::Type::Integer + name: 'cpu' + description: | + Optional. CPU request and limit for a single Airflow worker replica. + - !ruby/object:Api::Type::Integer + name: 'memoryGb' + description: | + Optional. Memory (GB) request and limit for a single Airflow worker replica. + - !ruby/object:Api::Type::Integer + name: 'storageGb' + description: | + Optional. Storage (GB) request and limit for a single Airflow worker replica. + - !ruby/object:Api::Type::Integer + name: 'minCount' + description: | + Optional. Minimum number of workers for autoscaling. + - !ruby/object:Api::Type::Integer + name: 'maxCount' + description: | + Optional. Maximum number of workers for autoscaling. + - !ruby/object:Api::Type::NestedObject + name: 'triggerer' + description: | + Configuration for resources used by Airflow triggerers. + properties: + - !ruby/object:Api::Type::Integer + name: 'count' + description: | + Optional. The number of triggerers. + - !ruby/object:Api::Type::Integer + name: 'cpu' + description: | + Optional. CPU request and limit for a single Airflow triggerer replica. + - !ruby/object:Api::Type::Integer + name: 'memoryGb' + description: | + Optional. Memory (GB) request and limit for a single Airflow triggerer replica. + - !ruby/object:Api::Type::Enum + name: 'environmentSize' + description: | + Optional. The size of the Cloud Composer environment. This field is supported for Cloud Composer environments in versions composer-2.*.*-airflow-*.*.* and newer. + values: + - :ENVIRONMENT_SIZE_UNSPECIFIED + - :ENVIRONMENT_SIZE_SMALL + - :ENVIRONMENT_SIZE_MEDIUM + - :ENVIRONMENT_SIZE_LARGE + - !ruby/object:Api::Type::String + name: 'airflowUri' + description: | + Output only. The URI of the Apache Airflow Web UI hosted within this environment (see [Airflow web interface](/composer/docs/how-to/accessing/airflow-web-interface)). + - !ruby/object:Api::Type::String + name: 'airflowByoidUri' + description: | + Output only. The 'bring your own identity' variant of the URI of the Apache Airflow Web UI hosted within this environment, to be accessed with external identities using workforce identity federation (see [Access environments with workforce identity federation](/composer/docs/composer-2/access-environments-with-workforce-identity-federation)). + - !ruby/object:Api::Type::NestedObject + name: 'masterAuthorizedNetworksConfig' + description: | + Configuration options for the master authorized networks feature. Enabled master authorized networks will disallow all external traffic to access Kubernetes master through HTTPS except traffic from the given CIDR blocks, Google Compute Engine Public IPs and Google Prod IPs. + properties: + - !ruby/object:Api::Type::Boolean + name: 'enabled' + description: | + Whether or not master authorized networks feature is enabled. + - !ruby/object:Api::Type::Array + name: 'cidrBlocks' + description: | + Up to 50 external networks that could access Kubernetes master through HTTPS. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'displayName' + description: | + User-defined name that identifies the CIDR block. + - !ruby/object:Api::Type::String + name: 'cidrBlock' + description: | + CIDR block that must be specified in CIDR notation. + - !ruby/object:Api::Type::NestedObject + name: 'recoveryConfig' + description: | + The Recovery settings of an environment. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'scheduledSnapshotsConfig' + description: | + The configuration for scheduled snapshot creation mechanism. + properties: + - !ruby/object:Api::Type::Boolean + name: 'enabled' + description: | + Optional. Whether scheduled snapshots creation is enabled. + - !ruby/object:Api::Type::String + name: 'snapshotLocation' + description: | + Optional. The Cloud Storage location for storing automatically created snapshots. + - !ruby/object:Api::Type::String + name: 'snapshotCreationSchedule' + description: | + Optional. The cron expression representing the time when snapshots creation mechanism runs. This field is subject to additional validation around frequency of execution. + - !ruby/object:Api::Type::String + name: 'timeZone' + description: | + Optional. Time zone that sets the context to interpret snapshot_creation_schedule. + - !ruby/object:Api::Type::Enum + name: 'resilienceMode' + description: | + Optional. Resilience mode of the Cloud Composer Environment. This field is supported for Cloud Composer environments in versions composer-2.2.0-airflow-*.*.* and newer. + values: + - :RESILIENCE_MODE_UNSPECIFIED + - :HIGH_RESILIENCE + - !ruby/object:Api::Type::String + name: 'uuid' + description: | + Output only. The UUID (Universally Unique IDentifier) associated with this environment. This value is generated when the environment is created. + - !ruby/object:Api::Type::Enum + name: 'state' + description: | + The current state of the environment. + values: + - :STATE_UNSPECIFIED + - :CREATING + - :RUNNING + - :UPDATING + - :DELETING + - :ERROR + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Output only. The time at which this environment was created. + - !ruby/object:Api::Type::String + name: 'updateTime' + description: | + Output only. The time at which this environment was last modified. + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + Optional. User-defined labels for this environment. The labels map can contain no more than 64 entries. Entries of the labels map are UTF8 strings that comply with the following restrictions: * Keys must conform to regexp: \p{Ll}\p{Lo}{0,62} * Values must conform to regexp: [\p{Ll}\p{Lo}\p{N}_-]{0,63} * Both keys and values are additionally constrained to be <= 128 bytes in size. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::Boolean + name: 'satisfiesPzs' + description: | + Output only. Reserved for future use. + - !ruby/object:Api::Type::NestedObject + name: 'storageConfig' + description: | + The configuration for data storage in the environment. + properties: + - !ruby/object:Api::Type::String + name: 'bucket' + description: | + Optional. The name of the Cloud Storage bucket used by the environment. No `gs://` prefix. + + + + + - !ruby/object:Api::Resource + name: ProjectLocationEnvironment + base_url: '{{parent}}/environments' + self_link: '{{name}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/composer/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + An environment for running orchestration tasks. + properties: + + - !ruby/object:Api::Type::String + name: 'name' + description: | + The resource name of the environment, in the form: "projects/{projectId}/locations/{locationId}/environments/{environmentId}" EnvironmentId must start with a lowercase letter followed by up to 63 lowercase letters, numbers, or hyphens, and cannot end with a hyphen. + - !ruby/object:Api::Type::NestedObject + name: 'config' + description: | + Configuration information for an environment. + properties: + - !ruby/object:Api::Type::String + name: 'gkeCluster' + description: | + Output only. The Kubernetes Engine cluster used to run this environment. + - !ruby/object:Api::Type::String + name: 'dagGcsPrefix' + description: | + Output only. The Cloud Storage prefix of the DAGs for this environment. Although Cloud Storage objects reside in a flat namespace, a hierarchical file tree can be simulated using "/"-delimited object name prefixes. DAG objects for this environment reside in a simulated directory with the given prefix. + - !ruby/object:Api::Type::Integer + name: 'nodeCount' + description: | + The number of nodes in the Kubernetes Engine cluster that will be used to run this environment. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. + - !ruby/object:Api::Type::NestedObject + name: 'softwareConfig' + description: | + Specifies the selection and configuration of software inside the environment. + properties: + - !ruby/object:Api::Type::String + name: 'imageVersion' + description: | + The version of the software running in the environment. This encapsulates both the version of Cloud Composer functionality and the version of Apache Airflow. It must match the regular expression `composer-([0-9]+(\.[0-9]+\.[0-9]+(-preview\.[0-9]+)?)?|latest)-airflow-([0-9]+(\.[0-9]+(\.[0-9]+)?)?)`. When used as input, the server also checks if the provided version is supported and denies the request for an unsupported version. The Cloud Composer portion of the image version is a full [semantic version](https://semver.org), or an alias in the form of major version number or `latest`. When an alias is provided, the server replaces it with the current Cloud Composer version that satisfies the alias. The Apache Airflow portion of the image version is a full semantic version that points to one of the supported Apache Airflow versions, or an alias in the form of only major or major.minor versions specified. When an alias is provided, the server replaces it with the latest Apache Airflow version that satisfies the alias and is supported in the given Cloud Composer version. In all cases, the resolved image version is stored in the same field. See also [version list](/composer/docs/concepts/versioning/composer-versions) and [versioning overview](/composer/docs/concepts/versioning/composer-versioning-overview). + - !ruby/object:Api::Type::NestedObject + name: 'airflowConfigOverrides' + description: | + Optional. Apache Airflow configuration properties to override. Property keys contain the section and property names, separated by a hyphen, for example "core-dags_are_paused_at_creation". Section names must not contain hyphens ("-"), opening square brackets ("["), or closing square brackets ("]"). The property name must not be empty and must not contain an equals sign ("=") or semicolon (";"). Section and property names must not contain a period ("."). Apache Airflow configuration property names must be written in [snake_case](https://en.wikipedia.org/wiki/Snake_case). Property values can contain any character, and can be written in any lower/upper case format. Certain Apache Airflow configuration property values are [blocked](/composer/docs/concepts/airflow-configurations), and cannot be overridden. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'pypiPackages' + description: | + Optional. Custom Python Package Index (PyPI) packages to be installed in the environment. Keys refer to the lowercase package name such as "numpy" and values are the lowercase extras and version specifier such as "==1.12.0", "[devel,gcp_api]", or "[devel]>=1.8.2, <1.9.2". To specify a package without pinning it to a version specifier, use the empty string as the value. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'envVariables' + description: | + Optional. Additional environment variables to provide to the Apache Airflow scheduler, worker, and webserver processes. Environment variable names must match the regular expression `a-zA-Z_*`. They cannot specify Apache Airflow software configuration overrides (they cannot match the regular expression `AIRFLOW__[A-Z0-9_]+__[A-Z0-9_]+`), and they cannot match any of the following reserved names: * `AIRFLOW_HOME` * `C_FORCE_ROOT` * `CONTAINER_NAME` * `DAGS_FOLDER` * `GCP_PROJECT` * `GCS_BUCKET` * `GKE_CLUSTER_NAME` * `SQL_DATABASE` * `SQL_INSTANCE` * `SQL_PASSWORD` * `SQL_PROJECT` * `SQL_REGION` * `SQL_USER` + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::String + name: 'pythonVersion' + description: | + Optional. The major version of Python used to run the Apache Airflow scheduler, worker, and webserver processes. Can be set to '2' or '3'. If not specified, the default is '3'. Cannot be updated. This field is only supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. Environments in newer versions always use Python major version 3. + - !ruby/object:Api::Type::Integer + name: 'schedulerCount' + description: | + Optional. The number of schedulers for Airflow. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-2.*.*. + - !ruby/object:Api::Type::NestedObject + name: 'nodeConfig' + description: | + The configuration information for the Kubernetes Engine nodes running the Apache Airflow software. + properties: + - !ruby/object:Api::Type::String + name: 'location' + description: | + Optional. The Compute Engine [zone](/compute/docs/regions-zones) in which to deploy the VMs used to run the Apache Airflow software, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: "projects/{projectId}/zones/{zoneId}". This `location` must belong to the enclosing environment's project and location. If both this field and `nodeConfig.machineType` are specified, `nodeConfig.machineType` must belong to this `location`; if both are unspecified, the service will pick a zone in the Compute Engine region corresponding to the Cloud Composer location, and propagate that choice to both fields. If only one field (`location` or `nodeConfig.machineType`) is specified, the location information from the specified field will be propagated to the unspecified field. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. + - !ruby/object:Api::Type::String + name: 'machineType' + description: | + Optional. The Compute Engine [machine type](/compute/docs/machine-types) used for cluster instances, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: "projects/{projectId}/zones/{zoneId}/machineTypes/{machineTypeId}". The `machineType` must belong to the enclosing environment's project and location. If both this field and `nodeConfig.location` are specified, this `machineType` must belong to the `nodeConfig.location`; if both are unspecified, the service will pick a zone in the Compute Engine region corresponding to the Cloud Composer location, and propagate that choice to both fields. If exactly one of this field and `nodeConfig.location` is specified, the location information from the specified field will be propagated to the unspecified field. The `machineTypeId` must not be a [shared-core machine type](/compute/docs/machine-types#sharedcore). If this field is unspecified, the `machineTypeId` defaults to "n1-standard-1". This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. + - !ruby/object:Api::Type::String + name: 'network' + description: | + Optional. The Compute Engine network to be used for machine communications, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: "projects/{projectId}/global/networks/{networkId}". If unspecified, the "default" network ID in the environment's project is used. If a [Custom Subnet Network](/vpc/docs/vpc#vpc_networks_and_subnets) is provided, `nodeConfig.subnetwork` must also be provided. For [Shared VPC](/vpc/docs/shared-vpc) subnetwork requirements, see `nodeConfig.subnetwork`. + - !ruby/object:Api::Type::String + name: 'subnetwork' + description: | + Optional. The Compute Engine subnetwork to be used for machine communications, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: "projects/{projectId}/regions/{regionId}/subnetworks/{subnetworkId}" If a subnetwork is provided, `nodeConfig.network` must also be provided, and the subnetwork must belong to the enclosing environment's project and location. + - !ruby/object:Api::Type::Integer + name: 'diskSizeGb' + description: | + Optional. The disk size in GB used for node VMs. Minimum size is 30GB. If unspecified, defaults to 100GB. Cannot be updated. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. + - !ruby/object:Api::Type::Array + name: 'oauthScopes' + description: | + Optional. The set of Google API scopes to be made available on all node VMs. If `oauth_scopes` is empty, defaults to ["https://www.googleapis.com/auth/cloud-platform"]. Cannot be updated. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'serviceAccount' + description: | + Optional. The Google Cloud Platform Service Account to be used by the node VMs. If a service account is not specified, the "default" Compute Engine service account is used. Cannot be updated. + - !ruby/object:Api::Type::Array + name: 'tags' + description: | + Optional. The list of instance tags applied to all node VMs. Tags are used to identify valid sources or targets for network firewalls. Each tag within the list must comply with [RFC1035](https://www.ietf.org/rfc/rfc1035.txt). Cannot be updated. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'ipAllocationPolicy' + description: | + Configuration for controlling how IPs are allocated in the GKE cluster running the Apache Airflow software. + properties: + - !ruby/object:Api::Type::Boolean + name: 'useIpAliases' + description: | + Optional. Whether or not to enable Alias IPs in the GKE cluster. If `true`, a VPC-native cluster is created. This field is only supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. Environments in newer versions always use VPC-native GKE clusters. + - !ruby/object:Api::Type::String + name: 'clusterSecondaryRangeName' + description: | + Optional. The name of the GKE cluster's secondary range used to allocate IP addresses to pods. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. + - !ruby/object:Api::Type::String + name: 'clusterIpv4CidrBlock' + description: | + Optional. The IP address range used to allocate IP addresses to pods in the GKE cluster. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use. + - !ruby/object:Api::Type::String + name: 'servicesSecondaryRangeName' + description: | + Optional. The name of the services' secondary range used to allocate IP addresses to the GKE cluster. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. + - !ruby/object:Api::Type::String + name: 'servicesIpv4CidrBlock' + description: | + Optional. The IP address range of the services IP addresses in this GKE cluster. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use. + - !ruby/object:Api::Type::Boolean + name: 'enableIpMasqAgent' + description: | + Optional. Deploys 'ip-masq-agent' daemon set in the GKE cluster and defines nonMasqueradeCIDRs equals to pod IP range so IP masquerading is used for all destination addresses, except between pods traffic. See: https://cloud.google.com/kubernetes-engine/docs/how-to/ip-masquerade-agent + - !ruby/object:Api::Type::NestedObject + name: 'privateEnvironmentConfig' + description: | + The configuration information for configuring a Private IP Cloud Composer environment. + properties: + - !ruby/object:Api::Type::Boolean + name: 'enablePrivateEnvironment' + description: | + Optional. If `true`, a Private IP Cloud Composer environment is created. If this field is set to true, `IPAllocationPolicy.use_ip_aliases` must be set to true for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. + - !ruby/object:Api::Type::NestedObject + name: 'privateClusterConfig' + description: | + Configuration options for the private GKE cluster in a Cloud Composer environment. + properties: + - !ruby/object:Api::Type::Boolean + name: 'enablePrivateEndpoint' + description: | + Optional. If `true`, access to the public endpoint of the GKE cluster is denied. + - !ruby/object:Api::Type::String + name: 'masterIpv4CidrBlock' + description: | + Optional. The CIDR block from which IPv4 range for GKE master will be reserved. If left blank, the default value of '172.16.0.0/23' is used. + - !ruby/object:Api::Type::String + name: 'masterIpv4ReservedRange' + description: | + Output only. The IP range in CIDR notation to use for the hosted master network. This range is used for assigning internal IP addresses to the GKE cluster master or set of masters and to the internal load balancer virtual IP. This range must not overlap with any other ranges in use within the cluster's network. + - !ruby/object:Api::Type::String + name: 'webServerIpv4CidrBlock' + description: | + Optional. The CIDR block from which IP range for web server will be reserved. Needs to be disjoint from `private_cluster_config.master_ipv4_cidr_block` and `cloud_sql_ipv4_cidr_block`. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. + - !ruby/object:Api::Type::String + name: 'cloudSqlIpv4CidrBlock' + description: | + Optional. The CIDR block from which IP range in tenant project will be reserved for Cloud SQL. Needs to be disjoint from `web_server_ipv4_cidr_block`. + - !ruby/object:Api::Type::String + name: 'webServerIpv4ReservedRange' + description: | + Output only. The IP range reserved for the tenant project's App Engine VMs. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. + - !ruby/object:Api::Type::String + name: 'cloudComposerNetworkIpv4CidrBlock' + description: | + Optional. The CIDR block from which IP range for Cloud Composer Network in tenant project will be reserved. Needs to be disjoint from private_cluster_config.master_ipv4_cidr_block and cloud_sql_ipv4_cidr_block. This field is supported for Cloud Composer environments in versions composer-2.*.*-airflow-*.*.* and newer. + - !ruby/object:Api::Type::String + name: 'cloudComposerNetworkIpv4ReservedRange' + description: | + Output only. The IP range reserved for the tenant project's Cloud Composer network. This field is supported for Cloud Composer environments in versions composer-2.*.*-airflow-*.*.* and newer. + - !ruby/object:Api::Type::Boolean + name: 'enablePrivatelyUsedPublicIps' + description: | + Optional. When enabled, IPs from public (non-RFC1918) ranges can be used for `IPAllocationPolicy.cluster_ipv4_cidr_block` and `IPAllocationPolicy.service_ipv4_cidr_block`. + - !ruby/object:Api::Type::String + name: 'cloudComposerConnectionSubnetwork' + description: | + Optional. When specified, the environment will use Private Service Connect instead of VPC peerings to connect to Cloud SQL in the Tenant Project, and the PSC endpoint in the Customer Project will use an IP address from this subnetwork. + - !ruby/object:Api::Type::NestedObject + name: 'networkingConfig' + description: | + Configuration options for networking connections in the Composer 2 environment. + properties: + - !ruby/object:Api::Type::Enum + name: 'connectionType' + description: | + Optional. Indicates the user requested specifc connection type between Tenant and Customer projects. You cannot set networking connection type in public IP environment. + values: + - :CONNECTION_TYPE_UNSPECIFIED + - :VPC_PEERING + - :PRIVATE_SERVICE_CONNECT + - !ruby/object:Api::Type::NestedObject + name: 'webServerNetworkAccessControl' + description: | + Network-level access control policy for the Airflow web server. + properties: + - !ruby/object:Api::Type::Array + name: 'allowedIpRanges' + description: | + A collection of allowed IP ranges with descriptions. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'value' + description: | + IP address or range, defined using CIDR notation, of requests that this rule applies to. Examples: `192.168.1.1` or `192.168.0.0/16` or `2001:db8::/32` or `2001:0db8:0000:0042:0000:8a2e:0370:7334`. IP range prefixes should be properly truncated. For example, `1.2.3.4/24` should be truncated to `1.2.3.0/24`. Similarly, for IPv6, `2001:db8::1/32` should be truncated to `2001:db8::/32`. + - !ruby/object:Api::Type::String + name: 'description' + description: | + Optional. User-provided description. It must contain at most 300 characters. + - !ruby/object:Api::Type::NestedObject + name: 'databaseConfig' + description: | + The configuration of Cloud SQL instance that is used by the Apache Airflow software. + properties: + - !ruby/object:Api::Type::String + name: 'machineType' + description: | + Optional. Cloud SQL machine type used by Airflow database. It has to be one of: db-n1-standard-2, db-n1-standard-4, db-n1-standard-8 or db-n1-standard-16. If not specified, db-n1-standard-2 will be used. Supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. + - !ruby/object:Api::Type::String + name: 'zone' + description: | + Optional. The Compute Engine zone where the Airflow database is created. If zone is provided, it must be in the region selected for the environment. If zone is not provided, a zone is automatically selected. The zone can only be set during environment creation. Supported for Cloud Composer environments in versions composer-2.*.*-airflow-*.*.*. + - !ruby/object:Api::Type::NestedObject + name: 'webServerConfig' + description: | + The configuration settings for the Airflow web server App Engine instance. Supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.* + properties: + - !ruby/object:Api::Type::String + name: 'machineType' + description: | + Optional. Machine type on which Airflow web server is running. It has to be one of: composer-n1-webserver-2, composer-n1-webserver-4 or composer-n1-webserver-8. If not specified, composer-n1-webserver-2 will be used. Value custom is returned only in response, if Airflow web server parameters were manually changed to a non-standard values. + - !ruby/object:Api::Type::NestedObject + name: 'encryptionConfig' + description: | + The encryption options for the Cloud Composer environment and its dependencies.Supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. + properties: + - !ruby/object:Api::Type::String + name: 'kmsKeyName' + description: | + Optional. Customer-managed Encryption Key available through Google's Key Management Service. Cannot be updated. If not specified, Google-managed key will be used. + - !ruby/object:Api::Type::NestedObject + name: 'maintenanceWindow' + description: | + The configuration settings for Cloud Composer maintenance window. The following example: ``` { "startTime":"2019-08-01T01:00:00Z" "endTime":"2019-08-01T07:00:00Z" "recurrence":"FREQ=WEEKLY;BYDAY=TU,WE" } ``` would define a maintenance window between 01 and 07 hours UTC during each Tuesday and Wednesday. + properties: + - !ruby/object:Api::Type::String + name: 'startTime' + description: | + Required. Start time of the first recurrence of the maintenance window. + - !ruby/object:Api::Type::String + name: 'endTime' + description: | + Required. Maintenance window end time. It is used only to calculate the duration of the maintenance window. The value for end-time must be in the future, relative to `start_time`. + - !ruby/object:Api::Type::String + name: 'recurrence' + description: | + Required. Maintenance window recurrence. Format is a subset of [RFC-5545](https://tools.ietf.org/html/rfc5545) `RRULE`. The only allowed values for `FREQ` field are `FREQ=DAILY` and `FREQ=WEEKLY;BYDAY=...` Example values: `FREQ=WEEKLY;BYDAY=TU,WE`, `FREQ=DAILY`. + - !ruby/object:Api::Type::NestedObject + name: 'workloadsConfig' + description: | + The Kubernetes workloads configuration for GKE cluster associated with the Cloud Composer environment. Supported for Cloud Composer environments in versions composer-2.*.*-airflow-*.*.* and newer. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'scheduler' + description: | + Configuration for resources used by Airflow schedulers. + properties: + - !ruby/object:Api::Type::Integer + name: 'cpu' + description: | + Optional. CPU request and limit for a single Airflow scheduler replica. + - !ruby/object:Api::Type::Integer + name: 'memoryGb' + description: | + Optional. Memory (GB) request and limit for a single Airflow scheduler replica. + - !ruby/object:Api::Type::Integer + name: 'storageGb' + description: | + Optional. Storage (GB) request and limit for a single Airflow scheduler replica. + - !ruby/object:Api::Type::Integer + name: 'count' + description: | + Optional. The number of schedulers. + - !ruby/object:Api::Type::NestedObject + name: 'webServer' + description: | + Configuration for resources used by Airflow web server. + properties: + - !ruby/object:Api::Type::Integer + name: 'cpu' + description: | + Optional. CPU request and limit for Airflow web server. + - !ruby/object:Api::Type::Integer + name: 'memoryGb' + description: | + Optional. Memory (GB) request and limit for Airflow web server. + - !ruby/object:Api::Type::Integer + name: 'storageGb' + description: | + Optional. Storage (GB) request and limit for Airflow web server. + - !ruby/object:Api::Type::NestedObject + name: 'worker' + description: | + Configuration for resources used by Airflow workers. + properties: + - !ruby/object:Api::Type::Integer + name: 'cpu' + description: | + Optional. CPU request and limit for a single Airflow worker replica. + - !ruby/object:Api::Type::Integer + name: 'memoryGb' + description: | + Optional. Memory (GB) request and limit for a single Airflow worker replica. + - !ruby/object:Api::Type::Integer + name: 'storageGb' + description: | + Optional. Storage (GB) request and limit for a single Airflow worker replica. + - !ruby/object:Api::Type::Integer + name: 'minCount' + description: | + Optional. Minimum number of workers for autoscaling. + - !ruby/object:Api::Type::Integer + name: 'maxCount' + description: | + Optional. Maximum number of workers for autoscaling. + - !ruby/object:Api::Type::NestedObject + name: 'triggerer' + description: | + Configuration for resources used by Airflow triggerers. + properties: + - !ruby/object:Api::Type::Integer + name: 'count' + description: | + Optional. The number of triggerers. + - !ruby/object:Api::Type::Integer + name: 'cpu' + description: | + Optional. CPU request and limit for a single Airflow triggerer replica. + - !ruby/object:Api::Type::Integer + name: 'memoryGb' + description: | + Optional. Memory (GB) request and limit for a single Airflow triggerer replica. + - !ruby/object:Api::Type::Enum + name: 'environmentSize' + description: | + Optional. The size of the Cloud Composer environment. This field is supported for Cloud Composer environments in versions composer-2.*.*-airflow-*.*.* and newer. + values: + - :ENVIRONMENT_SIZE_UNSPECIFIED + - :ENVIRONMENT_SIZE_SMALL + - :ENVIRONMENT_SIZE_MEDIUM + - :ENVIRONMENT_SIZE_LARGE + - !ruby/object:Api::Type::String + name: 'airflowUri' + description: | + Output only. The URI of the Apache Airflow Web UI hosted within this environment (see [Airflow web interface](/composer/docs/how-to/accessing/airflow-web-interface)). + - !ruby/object:Api::Type::String + name: 'airflowByoidUri' + description: | + Output only. The 'bring your own identity' variant of the URI of the Apache Airflow Web UI hosted within this environment, to be accessed with external identities using workforce identity federation (see [Access environments with workforce identity federation](/composer/docs/composer-2/access-environments-with-workforce-identity-federation)). + - !ruby/object:Api::Type::NestedObject + name: 'masterAuthorizedNetworksConfig' + description: | + Configuration options for the master authorized networks feature. Enabled master authorized networks will disallow all external traffic to access Kubernetes master through HTTPS except traffic from the given CIDR blocks, Google Compute Engine Public IPs and Google Prod IPs. + properties: + - !ruby/object:Api::Type::Boolean + name: 'enabled' + description: | + Whether or not master authorized networks feature is enabled. + - !ruby/object:Api::Type::Array + name: 'cidrBlocks' + description: | + Up to 50 external networks that could access Kubernetes master through HTTPS. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'displayName' + description: | + User-defined name that identifies the CIDR block. + - !ruby/object:Api::Type::String + name: 'cidrBlock' + description: | + CIDR block that must be specified in CIDR notation. + - !ruby/object:Api::Type::NestedObject + name: 'recoveryConfig' + description: | + The Recovery settings of an environment. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'scheduledSnapshotsConfig' + description: | + The configuration for scheduled snapshot creation mechanism. + properties: + - !ruby/object:Api::Type::Boolean + name: 'enabled' + description: | + Optional. Whether scheduled snapshots creation is enabled. + - !ruby/object:Api::Type::String + name: 'snapshotLocation' + description: | + Optional. The Cloud Storage location for storing automatically created snapshots. + - !ruby/object:Api::Type::String + name: 'snapshotCreationSchedule' + description: | + Optional. The cron expression representing the time when snapshots creation mechanism runs. This field is subject to additional validation around frequency of execution. + - !ruby/object:Api::Type::String + name: 'timeZone' + description: | + Optional. Time zone that sets the context to interpret snapshot_creation_schedule. + - !ruby/object:Api::Type::Enum + name: 'resilienceMode' + description: | + Optional. Resilience mode of the Cloud Composer Environment. This field is supported for Cloud Composer environments in versions composer-2.2.0-airflow-*.*.* and newer. + values: + - :RESILIENCE_MODE_UNSPECIFIED + - :HIGH_RESILIENCE + - !ruby/object:Api::Type::String + name: 'uuid' + description: | + Output only. The UUID (Universally Unique IDentifier) associated with this environment. This value is generated when the environment is created. + - !ruby/object:Api::Type::Enum + name: 'state' + description: | + The current state of the environment. + values: + - :STATE_UNSPECIFIED + - :CREATING + - :RUNNING + - :UPDATING + - :DELETING + - :ERROR + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Output only. The time at which this environment was created. + - !ruby/object:Api::Type::String + name: 'updateTime' + description: | + Output only. The time at which this environment was last modified. + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + Optional. User-defined labels for this environment. The labels map can contain no more than 64 entries. Entries of the labels map are UTF8 strings that comply with the following restrictions: * Keys must conform to regexp: \p{Ll}\p{Lo}{0,62} * Values must conform to regexp: [\p{Ll}\p{Lo}\p{N}_-]{0,63} * Both keys and values are additionally constrained to be <= 128 bytes in size. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::Boolean + name: 'satisfiesPzs' + description: | + Output only. Reserved for future use. + - !ruby/object:Api::Type::NestedObject + name: 'storageConfig' + description: | + The configuration for data storage in the environment. + properties: + - !ruby/object:Api::Type::String + name: 'bucket' + description: | + Optional. The name of the Cloud Storage bucket used by the environment. No `gs://` prefix. + diff --git a/mmv1/products/composer/inspec.yaml b/mmv1/products/composer/inspec.yaml new file mode 100644 index 000000000..28cb365e9 --- /dev/null +++ b/mmv1/products/composer/inspec.yaml @@ -0,0 +1,15 @@ +# Copyright 2017 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- !ruby/object:Provider::Inspec::Config +overrides: !ruby/object:Overrides::ResourceOverrides diff --git a/mmv1/products/compute/api.yaml b/mmv1/products/compute/api.yaml index 793f41d7c..36079ad40 100644 --- a/mmv1/products/compute/api.yaml +++ b/mmv1/products/compute/api.yaml @@ -14355,146 +14355,6 @@ objects: input: true description: | The size of the disk in base-2 GB. - - !ruby/object:Api::Resource - name: 'ServiceAttachment' - kind: 'compute#ServiceAttachment' - base_url: projects/{{project}}/regions/{{region}}/serviceAttachments - has_self_link: true - update_verb: :PATCH - description: | - Represents a ServiceAttachment resource. - references: !ruby/object:Api::Resource::ReferenceLinks - guides: - 'Configuring Private Service Connect to access services': 'https://cloud.google.com/vpc/docs/configure-private-service-connect-services' - api: 'https://cloud.google.com/compute/docs/reference/beta/serviceAttachments' - async: !ruby/object:Api::OpAsync - operation: !ruby/object:Api::OpAsync::Operation - kind: 'compute#operation' - path: 'name' - base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' - wait_ms: 1000 - result: !ruby/object:Api::OpAsync::Result - path: 'targetLink' - status: !ruby/object:Api::OpAsync::Status - path: 'status' - complete: 'DONE' - allowed: - - 'PENDING' - - 'RUNNING' - - 'DONE' - error: !ruby/object:Api::OpAsync::Error - path: 'error/errors' - message: 'message' - parameters: - - !ruby/object:Api::Type::ResourceRef - name: 'region' - resource: 'Region' - imports: 'name' - description: | - URL of the region where the resource resides. - required: true - input: true - properties: - - !ruby/object:Api::Type::String - name: 'name' - required: true - description: | - Name of the resource. The name must be 1-63 characters long, and - comply with RFC1035. Specifically, the name must be 1-63 characters - long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` - which means the first character must be a lowercase letter, and all - following characters must be a dash, lowercase letter, or digit, - except the last character, which cannot be a dash. - input: true - - !ruby/object:Api::Type::String - name: 'description' - description: | - An optional description of this resource. - - !ruby/object:Api::Type::Fingerprint - name: 'fingerprint' - description: | - Fingerprint of this resource. This field is used internally during - updates of this resource. - - !ruby/object:Api::Type::String - name: 'connectionPreference' - required: true - description: | - The connection preference to use for this service attachment. Valid - values include "ACCEPT_AUTOMATIC", "ACCEPT_MANUAL". - - !ruby/object:Api::Type::Array - name: 'connectedEndpoints' - output: true - description: | - An array of the consumer forwarding rules connected to this service - attachment. - item_type: !ruby/object:Api::Type::NestedObject - properties: - - !ruby/object:Api::Type::String - name: 'endpoint' - output: true - description: | - The URL of the consumer forwarding rule. - - !ruby/object:Api::Type::String - name: 'status' - output: true - description: | - The status of the connection from the consumer forwarding rule to - this service attachment. - - !ruby/object:Api::Type::ResourceRef - name: targetService - required: true - input: true - resource: 'ForwardingRule' - imports: 'selfLink' - description: | - The URL of a forwarding rule that represents the service identified by - this service attachment. - - !ruby/object:Api::Type::Array - name: 'natSubnets' - required: true - send_empty_value: true - description: | - An array of subnets that is provided for NAT in this service attachment. - item_type: !ruby/object:Api::Type::ResourceRef - name: 'subnet' - resource: 'Subnetwork' - imports: 'selfLink' - description: | - A subnet that is provided for NAT in this service attachment. - - !ruby/object:Api::Type::Boolean - name: 'enableProxyProtocol' - required: true - input: true - description: | - If true, enable the proxy protocol which is for supplying client TCP/IP - address data in TCP connections that traverse proxies on their way to - destination servers. - - !ruby/object:Api::Type::Array - name: 'consumerRejectLists' - item_type: Api::Type::String - send_empty_value: true - description: | - An array of projects that are not allowed to connect to this service - attachment. - - !ruby/object:Api::Type::Array - name: 'consumerAcceptLists' - description: | - An array of projects that are allowed to connect to this service - attachment. - send_empty_value: true - item_type: !ruby/object:Api::Type::NestedObject - properties: - - !ruby/object:Api::Type::String - name: 'projectIdOrNum' - required: true - description: | - A project that is allowed to connect to this service attachment. - - !ruby/object:Api::Type::Integer - name: 'connectionLimit' - required: true - description: | - The number of consumer forwarding rules the consumer project can - create. - !ruby/object:Api::Resource name: 'SslPolicy' kind: 'compute#sslPolicy' @@ -15083,205 +14943,6 @@ objects: description: | This field only applies when the forwarding rule that references this target proxy has a loadBalancingScheme set to INTERNAL_SELF_MANAGED. - - !ruby/object:Api::Resource - name: 'RegionTargetHttpProxy' - kind: 'compute#targetHttpProxy' - base_url: projects/{{project}}/regions/{{region}}/targetHttpProxies - collection_url_key: 'items' - has_self_link: true - input: true - description: | - Represents a RegionTargetHttpProxy resource, which is used by one or more - forwarding rules to route incoming HTTP requests to a URL map. - references: !ruby/object:Api::Resource::ReferenceLinks - guides: - 'Official Documentation': - 'https://cloud.google.com/compute/docs/load-balancing/http/target-proxies' - api: 'https://cloud.google.com/compute/docs/reference/rest/v1/regionTargetHttpProxies' - async: !ruby/object:Api::OpAsync - operation: !ruby/object:Api::OpAsync::Operation - kind: 'compute#operation' - path: 'name' - base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' - wait_ms: 1000 - result: !ruby/object:Api::OpAsync::Result - path: 'targetLink' - status: !ruby/object:Api::OpAsync::Status - path: 'status' - complete: 'DONE' - allowed: - - 'PENDING' - - 'RUNNING' - - 'DONE' - error: !ruby/object:Api::OpAsync::Error - path: 'error/errors' - message: 'message' - parameters: - - !ruby/object:Api::Type::ResourceRef - name: 'region' - resource: 'Region' - imports: 'name' - required: true - input: true - description: | - The region where the regional proxy resides. - properties: - - !ruby/object:Api::Type::Time - name: 'creationTimestamp' - description: 'Creation timestamp in RFC3339 text format.' - output: true - - !ruby/object:Api::Type::String - name: 'description' - description: 'An optional description of this resource.' - - !ruby/object:Api::Type::Integer - name: 'id' - description: 'The unique identifier for the resource.' - output: true - - !ruby/object:Api::Type::String - name: 'name' - description: | - Name of the resource. Provided by the client when the resource is - created. The name must be 1-63 characters long, and comply with - RFC1035. Specifically, the name must be 1-63 characters long and match - the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the - first character must be a lowercase letter, and all following - characters must be a dash, lowercase letter, or digit, except the last - character, which cannot be a dash. - required: true - - !ruby/object:Api::Type::ResourceRef - name: 'urlMap' - resource: 'RegionUrlMap' - imports: 'selfLink' - description: | - A reference to the RegionUrlMap resource that defines the mapping from URL - to the BackendService. - required: true - update_verb: :POST - update_url: 'projects/{{project}}/regions/{{region}}/targetHttpProxies/{{name}}/setUrlMap' - - !ruby/object:Api::Resource - name: 'RegionTargetHttpsProxy' - kind: 'compute#targetHttpsProxy' - base_url: projects/{{project}}/regions/{{region}}/targetHttpsProxies - collection_url_key: 'items' - has_self_link: true - input: true - description: | - Represents a RegionTargetHttpsProxy resource, which is used by one or more - forwarding rules to route incoming HTTPS requests to a URL map. - references: !ruby/object:Api::Resource::ReferenceLinks - guides: - 'Official Documentation': 'https://cloud.google.com/compute/docs/load-balancing/http/target-proxies' - api: 'https://cloud.google.com/compute/docs/reference/rest/v1/regionTargetHttpsProxies' - async: !ruby/object:Api::OpAsync - operation: !ruby/object:Api::OpAsync::Operation - kind: 'compute#operation' - path: 'name' - base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' - wait_ms: 1000 - result: !ruby/object:Api::OpAsync::Result - path: 'targetLink' - status: !ruby/object:Api::OpAsync::Status - path: 'status' - complete: 'DONE' - allowed: - - 'PENDING' - - 'RUNNING' - - 'DONE' - error: !ruby/object:Api::OpAsync::Error - path: 'error/errors' - message: 'message' - parameters: - - !ruby/object:Api::Type::ResourceRef - name: 'region' - resource: 'Region' - imports: 'name' - input: true - required: true - description: | - The region where the regional proxy resides. - properties: - - !ruby/object:Api::Type::Time - name: 'creationTimestamp' - description: 'Creation timestamp in RFC3339 text format.' - output: true - - !ruby/object:Api::Type::String - name: 'description' - description: 'An optional description of this resource.' - input: true - - !ruby/object:Api::Type::Integer - name: 'id' - description: 'The unique identifier for the resource.' - output: true - - !ruby/object:Api::Type::String - name: 'name' - description: | - Name of the resource. Provided by the client when the resource is - created. The name must be 1-63 characters long, and comply with - RFC1035. Specifically, the name must be 1-63 characters long and match - the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the - first character must be a lowercase letter, and all following - characters must be a dash, lowercase letter, or digit, except the last - character, which cannot be a dash. - input: true - required: true - # This field is present in the schema but as of 2019 Sep 23 attempting to set it fails with - # a 400 "QUIC override is supported only with global TargetHttpsProxy". jamessynge@ said in an - # email sent on 2019 Sep 20 that support for this "is probably far in the future." - #- !ruby/object:Api::Type::Enum - # name: 'quicOverride' - # description: | - # Specifies the QUIC override policy for this resource. This determines - # whether the load balancer will attempt to negotiate QUIC with clients - # or not. Can specify one of NONE, ENABLE, or DISABLE. If NONE is - # specified, uses the QUIC policy with no user overrides, which is - # equivalent to DISABLE. Not specifying this field is equivalent to - # specifying NONE. - # values: - # - :NONE - # - :ENABLE - # - :DISABLE - # update_verb: :POST - # update_url: - # 'projects/{{project}}/regions/{{region}}/targetHttpsProxies/{{name}}/setQuicOverride' - - !ruby/object:Api::Type::Array - name: 'sslCertificates' - description: | - A list of RegionSslCertificate resources that are used to authenticate - connections between users and the load balancer. Currently, exactly - one SSL certificate must be specified. - required: true - update_verb: :POST - update_url: - 'projects/{{project}}/regions/{{region}}/targetHttpsProxies/{{name}}/setSslCertificates' - item_type: !ruby/object:Api::Type::ResourceRef - name: 'sslCertificate' - resource: 'RegionSslCertificate' - imports: 'selfLink' - description: 'The SSL certificates used by this TargetHttpsProxy' - # This field is present in the schema but as of 2019 Sep 23 attempting to set it fails with - # a 400 "SSL policy is supported only with global TargetHttpsProxy". jamessynge@ said in an - # email sent on 2019 Sep 20 that will support this "in the next 6 months I hope." - #- !ruby/object:Api::Type::ResourceRef - # name: 'sslPolicy' - # resource: 'SslPolicy' - # imports: 'selfLink' - # description: | - # A reference to the SslPolicy resource that will be associated with - # the TargetHttpsProxy resource. If not set, the TargetHttpsProxy - # resource will not have any SSL policy configured. - # update_verb: :POST - # update_url: - # 'projects/{{project}}/regions/{{region}}/targetHttpsProxies/{{name}}/setSslPolicy' - - !ruby/object:Api::Type::ResourceRef - name: 'urlMap' - resource: 'RegionUrlMap' - imports: 'selfLink' - description: | - A reference to the RegionUrlMap resource that defines the mapping from URL - to the RegionBackendService. - required: true - update_verb: :POST - update_url: 'projects/{{project}}/regions/{{region}}/targetHttpsProxies/{{name}}/setUrlMap' - !ruby/object:Api::Resource name: 'TargetInstance' kind: 'compute#targetInstance' @@ -15789,117 +15450,16 @@ objects: output: true # status is not useful for state convergence - !ruby/object:Api::Resource - name: 'VpnGateway' - kind: 'compute#vpnGateway' - base_url: projects/{{project}}/regions/{{region}}/vpnGateways + name: 'ExternalVpnGateway' + kind: 'compute#externalVpnGateway' + base_url: projects/{{project}}/global/externalVpnGateways collection_url_key: 'items' input: true has_self_link: true description: | - Represents a VPN gateway running in GCP. This virtual device is managed - by Google, but used only by you. This type of VPN Gateway allows for the creation - of VPN solutions with higher availability than classic Target VPN Gateways. + Represents a VPN gateway managed outside of GCP. references: !ruby/object:Api::Resource::ReferenceLinks - guides: - 'Choosing a VPN': https://cloud.google.com/vpn/docs/how-to/choosing-a-vpn - 'Cloud VPN Overview': 'https://cloud.google.com/vpn/docs/concepts/overview' - api: https://cloud.google.com/compute/docs/reference/rest/v1/vpnGateways - async: !ruby/object:Api::OpAsync - operation: !ruby/object:Api::OpAsync::Operation - kind: 'compute#operation' - path: 'name' - base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' - wait_ms: 1000 - result: !ruby/object:Api::OpAsync::Result - path: 'targetLink' - status: !ruby/object:Api::OpAsync::Status - path: 'status' - complete: 'DONE' - allowed: - - 'PENDING' - - 'RUNNING' - - 'DONE' - error: !ruby/object:Api::OpAsync::Error - path: 'error/errors' - message: 'message' - parameters: - - !ruby/object:Api::Type::ResourceRef - name: 'region' - resource: 'Region' - imports: 'name' - description: | - The region this gateway should sit in. - required: true - properties: - - !ruby/object:Api::Type::Time - name: 'creationTimestamp' - description: 'Creation timestamp in RFC3339 text format.' - output: true - - !ruby/object:Api::Type::String - name: 'description' - description: 'An optional description of this resource.' - input: true - - !ruby/object:Api::Type::String - name: 'name' - description: | - Name of the resource. Provided by the client when the resource is - created. The name must be 1-63 characters long, and comply with - RFC1035. Specifically, the name must be 1-63 characters long and - match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means - the first character must be a lowercase letter, and all following - characters must be a dash, lowercase letter, or digit, except the last - character, which cannot be a dash. - required: true - input: true - - !ruby/object:Api::Type::Integer - name: 'id' - description: 'The unique identifier for the resource.' - output: true - - !ruby/object:Api::Type::ResourceRef - name: 'network' - resource: 'Network' - imports: 'selfLink' - description: | - The network this VPN gateway is accepting traffic for. - required: true - input: true - - !ruby/object:Api::Type::Array - name: 'vpnInterfaces' - description: | - A list of interfaces on this VPN gateway. - item_type: !ruby/object:Api::Type::NestedObject - properties: - - !ruby/object:Api::Type::Integer - name: 'id' - description: 'The numeric ID of this VPN gateway interface.' - - !ruby/object:Api::Type::String - name: 'ipAddress' - description: 'The external IP address for this VPN gateway interface.' - output: true - - !ruby/object:Api::Type::ResourceRef - name: 'interconnectAttachment' - resource: 'InterconnectAttachment' - imports: 'selfLink' - description: | - URL of the interconnect attachment resource. When the value - of this field is present, the VPN Gateway will be used for - IPsec-encrypted Cloud Interconnect; all Egress or Ingress - traffic for this VPN Gateway interface will go through the - specified interconnect attachment resource. - - Not currently available publicly. - input: true - - !ruby/object:Api::Resource - name: 'ExternalVpnGateway' - kind: 'compute#externalVpnGateway' - base_url: projects/{{project}}/global/externalVpnGateways - collection_url_key: 'items' - input: true - has_self_link: true - description: | - Represents a VPN gateway managed outside of GCP. - references: !ruby/object:Api::Resource::ReferenceLinks - api: https://cloud.google.com/compute/docs/reference/rest/v1/externalVpnGateways + api: https://cloud.google.com/compute/docs/reference/rest/v1/externalVpnGateways async: !ruby/object:Api::OpAsync operation: !ruby/object:Api::OpAsync::Operation kind: 'compute#operation' @@ -19820,4 +19380,11694 @@ objects: - :INITIALIZING - :READY_TO_ANNOUNCE - :ANNOUNCED - - :DELETING \ No newline at end of file + - :DELETING + + + + - !ruby/object:Api::Resource + name: ServiceAttachment + base_url: 'projects/{{project}}/regions/{{region}}/serviceAttachments' + self_link: 'projects/{{project}}/regions/{{region}}/serviceAttachments/{{serviceAttachment}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/compute/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + Represents a ServiceAttachment resource. A service attachment represents a service that a producer has exposed. It encapsulates the load balancer which fronts the service runs and a list of NAT IP ranges that the producers uses to represent the consumers connecting to the service. + properties: + + - !ruby/object:Api::Type::String + name: 'kind' + description: | + [Output Only] Type of the resource. Always compute#serviceAttachment for service attachments. + - !ruby/object:Api::Type::String + name: 'id' + description: | + [Output Only] The unique identifier for the resource type. The server generates this identifier. + - !ruby/object:Api::Type::String + name: 'creationTimestamp' + description: | + [Output Only] Creation timestamp in RFC3339 text format. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. + - !ruby/object:Api::Type::String + name: 'description' + description: | + An optional description of this resource. Provide this property when you create the resource. + - !ruby/object:Api::Type::String + name: 'selfLink' + description: | + [Output Only] Server-defined URL for the resource. + - !ruby/object:Api::Type::String + name: 'region' + description: | + [Output Only] URL of the region where the service attachment resides. This field applies only to the region resource. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body. + - !ruby/object:Api::Type::String + name: 'producerForwardingRule' + description: | + The URL of a forwarding rule with loadBalancingScheme INTERNAL* that is serving the endpoint identified by this service attachment. + - !ruby/object:Api::Type::String + name: 'targetService' + description: | + The URL of a service serving the endpoint identified by this service attachment. + - !ruby/object:Api::Type::Enum + name: 'connectionPreference' + description: | + The connection preference of service attachment. The value can be set to ACCEPT_AUTOMATIC. An ACCEPT_AUTOMATIC service attachment is one that always accepts the connection from consumer forwarding rules. + values: + - :ACCEPT_AUTOMATIC + - :ACCEPT_MANUAL + - :CONNECTION_PREFERENCE_UNSPECIFIED + - !ruby/object:Api::Type::Array + name: 'connectedEndpoints' + description: | + [Output Only] An array of connections for all the consumers connected to this service attachment. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'natSubnets' + description: | + An array of URLs where each entry is the URL of a subnet provided by the service producer to use for NAT in this service attachment. + item_type: Api::Type::String + - !ruby/object:Api::Type::Boolean + name: 'enableProxyProtocol' + description: | + If true, enable the proxy protocol which is for supplying client TCP/IP address data in TCP connections that traverse proxies on their way to destination servers. + - !ruby/object:Api::Type::Array + name: 'consumerRejectLists' + description: | + Projects that are not allowed to connect to this service attachment. The project can be specified using its id or number. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'consumerAcceptLists' + description: | + Projects that are allowed to connect to this service attachment. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'pscServiceAttachmentId' + description: | + [Output Only] An 128-bit global unique ID of the PSC service attachment. + - !ruby/object:Api::Type::String + name: 'fingerprint' + description: | + Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. This field will be ignored when inserting a ServiceAttachment. An up-to-date fingerprint must be provided in order to patch/update the ServiceAttachment; otherwise, the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make a get() request to retrieve the ServiceAttachment. + - !ruby/object:Api::Type::Array + name: 'domainNames' + description: | + If specified, the domain name will be used during the integration between the PSC connected endpoints and the Cloud DNS. For example, this is a valid domain name: "p.mycompany.com.". Current max number of domain names supported is 1. + item_type: Api::Type::String + - !ruby/object:Api::Type::Boolean + name: 'reconcileConnections' + description: | + This flag determines whether a consumer accept/reject list change can reconcile the statuses of existing ACCEPTED or REJECTED PSC endpoints. - If false, connection policy update will only affect existing PENDING PSC endpoints. Existing ACCEPTED/REJECTED endpoints will remain untouched regardless how the connection policy is modified . - If true, update will affect both PENDING and ACCEPTED/REJECTED PSC endpoints. For example, an ACCEPTED PSC endpoint will be moved to REJECTED if its project is added to the reject list. For newly created service attachment, this boolean defaults to false. + + + + + - !ruby/object:Api::Resource + name: VpnGateway + base_url: 'projects/{{project}}/regions/{{region}}/vpnGateways' + self_link: 'projects/{{project}}/regions/{{region}}/vpnGateways/{{vpnGateway}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/compute/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + Represents a HA VPN gateway. HA VPN is a high-availability (HA) Cloud VPN solution that lets you securely connect your on-premises network to your Google Cloud Virtual Private Cloud network through an IPsec VPN connection in a single region. For more information about Cloud HA VPN solutions, see Cloud VPN topologies . + properties: + + - !ruby/object:Api::Type::String + name: 'kind' + description: | + [Output Only] Type of resource. Always compute#vpnGateway for VPN gateways. + - !ruby/object:Api::Type::String + name: 'id' + description: | + [Output Only] The unique identifier for the resource. This identifier is defined by the server. + - !ruby/object:Api::Type::String + name: 'creationTimestamp' + description: | + [Output Only] Creation timestamp in RFC3339 text format. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. + - !ruby/object:Api::Type::String + name: 'description' + description: | + An optional description of this resource. Provide this property when you create the resource. + - !ruby/object:Api::Type::String + name: 'region' + description: | + [Output Only] URL of the region where the VPN gateway resides. + - !ruby/object:Api::Type::String + name: 'network' + description: | + URL of the network to which this VPN gateway is attached. Provided by the client when the VPN gateway is created. + - !ruby/object:Api::Type::String + name: 'selfLink' + description: | + [Output Only] Server-defined URL for the resource. + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + Labels for this resource. These can only be added or modified by the setLabels method. Each label key/value pair must comply with RFC1035. Label values may be empty. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::String + name: 'labelFingerprint' + description: | + A fingerprint for the labels being applied to this VpnGateway, which is essentially a hash of the labels set used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels, otherwise the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make a get() request to retrieve a VpnGateway. + - !ruby/object:Api::Type::Array + name: 'vpnInterfaces' + description: | + The list of VPN interfaces associated with this VPN gateway. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::Integer + name: 'id' + description: | + [Output Only] Numeric identifier for this VPN interface associated with the VPN gateway. + - !ruby/object:Api::Type::String + name: 'ipAddress' + description: | + [Output Only] IP address for this VPN interface associated with the VPN gateway. The IP address could be either a regional external IP address or a regional internal IP address. The two IP addresses for a VPN gateway must be all regional external or regional internal IP addresses. There cannot be a mix of regional external IP addresses and regional internal IP addresses. For HA VPN over Cloud Interconnect, the IP addresses for both interfaces could either be regional internal IP addresses or regional external IP addresses. For regular (non HA VPN over Cloud Interconnect) HA VPN tunnels, the IP address must be a regional external IP address. + - !ruby/object:Api::Type::String + name: 'interconnectAttachment' + description: | + URL of the VLAN attachment (interconnectAttachment) resource for this VPN gateway interface. When the value of this field is present, the VPN gateway is used for HA VPN over Cloud Interconnect; all egress or ingress traffic for this VPN gateway interface goes through the specified VLAN attachment resource. + - !ruby/object:Api::Type::Enum + name: 'stackType' + description: | + The stack type for this VPN gateway to identify the IP protocols that are enabled. Possible values are: IPV4_ONLY, IPV4_IPV6. If not specified, IPV4_ONLY will be used. + values: + - :IPV4_IPV6 + - :IPV4_ONLY + + + + + - !ruby/object:Api::Resource + name: MachineType + base_url: 'projects/{{project}}/zones/{{zone}}/machineTypes' + self_link: 'projects/{{project}}/zones/{{zone}}/machineTypes/{{machineType}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/compute_v1/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + Represents a Machine Type resource. You can use specific machine types for your VM instances based on performance and pricing requirements. For more information, read Machine Types. + properties: + + - !ruby/object:Api::Type::String + name: 'kind' + description: | + [Output Only] The type of the resource. Always compute#machineType for machine types. + - !ruby/object:Api::Type::String + name: 'id' + description: | + [Output Only] The unique identifier for the resource. This identifier is defined by the server. + - !ruby/object:Api::Type::String + name: 'creationTimestamp' + description: | + [Output Only] Creation timestamp in RFC3339 text format. + - !ruby/object:Api::Type::String + name: 'name' + description: | + [Output Only] Name of the resource. + - !ruby/object:Api::Type::String + name: 'description' + description: | + [Output Only] An optional textual description of the resource. + - !ruby/object:Api::Type::Integer + name: 'guestCpus' + description: | + [Output Only] The number of virtual CPUs that are available to the instance. + - !ruby/object:Api::Type::Integer + name: 'memoryMb' + description: | + [Output Only] The amount of physical memory available to the instance, defined in MB. + - !ruby/object:Api::Type::Integer + name: 'imageSpaceGb' + description: | + [Deprecated] This property is deprecated and will never be populated with any relevant values. + - !ruby/object:Api::Type::Array + name: 'scratchDisks' + description: | + [Output Only] A list of extended scratch disks assigned to the instance. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::Integer + name: 'diskGb' + description: | + Size of the scratch disk, defined in GB. + - !ruby/object:Api::Type::Integer + name: 'maximumPersistentDisks' + description: | + [Output Only] Maximum persistent disks allowed. + - !ruby/object:Api::Type::String + name: 'maximumPersistentDisksSizeGb' + description: | + [Output Only] Maximum total persistent disks size (GB) allowed. + - !ruby/object:Api::Type::NestedObject + name: 'deprecated' + description: | + Deprecation status for a public resource. + properties: + - !ruby/object:Api::Type::Enum + name: 'state' + description: | + The deprecation state of this resource. This can be ACTIVE, DEPRECATED, OBSOLETE, or DELETED. Operations which communicate the end of life date for an image, can use ACTIVE. Operations which create a new resource using a DEPRECATED resource will return successfully, but with a warning indicating the deprecated resource and recommending its replacement. Operations which use OBSOLETE or DELETED resources will be rejected and result in an error. + values: + - :ACTIVE + - :DELETED + - :DEPRECATED + - :OBSOLETE + - !ruby/object:Api::Type::String + name: 'replacement' + description: | + The URL of the suggested replacement for a deprecated resource. The suggested replacement resource must be the same kind of resource as the deprecated resource. + - !ruby/object:Api::Type::String + name: 'deprecated' + description: | + An optional RFC3339 timestamp on or after which the state of this resource is intended to change to DEPRECATED. This is only informational and the status will not change unless the client explicitly changes it. + - !ruby/object:Api::Type::String + name: 'obsolete' + description: | + An optional RFC3339 timestamp on or after which the state of this resource is intended to change to OBSOLETE. This is only informational and the status will not change unless the client explicitly changes it. + - !ruby/object:Api::Type::String + name: 'deleted' + description: | + An optional RFC3339 timestamp on or after which the state of this resource is intended to change to DELETED. This is only informational and the status will not change unless the client explicitly changes it. + - !ruby/object:Api::Type::String + name: 'zone' + description: | + [Output Only] The name of the zone where the machine type resides, such as us-central1-a. + - !ruby/object:Api::Type::String + name: 'selfLink' + description: | + [Output Only] Server-defined URL for the resource. + - !ruby/object:Api::Type::Boolean + name: 'isSharedCpu' + description: | + [Output Only] Whether this machine type has a shared CPU. See Shared-core machine types for more information. + - !ruby/object:Api::Type::Array + name: 'accelerators' + description: | + [Output Only] A list of accelerator configurations assigned to this machine type. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'guestAcceleratorType' + description: | + The accelerator type resource name, not a full URL, e.g. nvidia-tesla-t4. + - !ruby/object:Api::Type::Integer + name: 'guestAcceleratorCount' + description: | + Number of accelerator cards exposed to the guest. + + + + + - !ruby/object:Api::Resource + name: MachineType + base_url: 'projects/{{project}}/zones/{{zone}}/machineTypes' + self_link: 'projects/{{project}}/zones/{{zone}}/machineTypes/{{machineType}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/compute_v1/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + Represents a Machine Type resource. You can use specific machine types for your VM instances based on performance and pricing requirements. For more information, read Machine Types. + properties: + + - !ruby/object:Api::Type::String + name: 'kind' + description: | + [Output Only] The type of the resource. Always compute#machineType for machine types. + - !ruby/object:Api::Type::String + name: 'id' + description: | + [Output Only] The unique identifier for the resource. This identifier is defined by the server. + - !ruby/object:Api::Type::String + name: 'creationTimestamp' + description: | + [Output Only] Creation timestamp in RFC3339 text format. + - !ruby/object:Api::Type::String + name: 'name' + description: | + [Output Only] Name of the resource. + - !ruby/object:Api::Type::String + name: 'description' + description: | + [Output Only] An optional textual description of the resource. + - !ruby/object:Api::Type::Integer + name: 'guestCpus' + description: | + [Output Only] The number of virtual CPUs that are available to the instance. + - !ruby/object:Api::Type::Integer + name: 'memoryMb' + description: | + [Output Only] The amount of physical memory available to the instance, defined in MB. + - !ruby/object:Api::Type::Integer + name: 'imageSpaceGb' + description: | + [Deprecated] This property is deprecated and will never be populated with any relevant values. + - !ruby/object:Api::Type::Array + name: 'scratchDisks' + description: | + [Output Only] A list of extended scratch disks assigned to the instance. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::Integer + name: 'diskGb' + description: | + Size of the scratch disk, defined in GB. + - !ruby/object:Api::Type::Integer + name: 'maximumPersistentDisks' + description: | + [Output Only] Maximum persistent disks allowed. + - !ruby/object:Api::Type::String + name: 'maximumPersistentDisksSizeGb' + description: | + [Output Only] Maximum total persistent disks size (GB) allowed. + - !ruby/object:Api::Type::NestedObject + name: 'deprecated' + description: | + Deprecation status for a public resource. + properties: + - !ruby/object:Api::Type::Enum + name: 'state' + description: | + The deprecation state of this resource. This can be ACTIVE, DEPRECATED, OBSOLETE, or DELETED. Operations which communicate the end of life date for an image, can use ACTIVE. Operations which create a new resource using a DEPRECATED resource will return successfully, but with a warning indicating the deprecated resource and recommending its replacement. Operations which use OBSOLETE or DELETED resources will be rejected and result in an error. + values: + - :ACTIVE + - :DELETED + - :DEPRECATED + - :OBSOLETE + - !ruby/object:Api::Type::String + name: 'replacement' + description: | + The URL of the suggested replacement for a deprecated resource. The suggested replacement resource must be the same kind of resource as the deprecated resource. + - !ruby/object:Api::Type::String + name: 'deprecated' + description: | + An optional RFC3339 timestamp on or after which the state of this resource is intended to change to DEPRECATED. This is only informational and the status will not change unless the client explicitly changes it. + - !ruby/object:Api::Type::String + name: 'obsolete' + description: | + An optional RFC3339 timestamp on or after which the state of this resource is intended to change to OBSOLETE. This is only informational and the status will not change unless the client explicitly changes it. + - !ruby/object:Api::Type::String + name: 'deleted' + description: | + An optional RFC3339 timestamp on or after which the state of this resource is intended to change to DELETED. This is only informational and the status will not change unless the client explicitly changes it. + - !ruby/object:Api::Type::String + name: 'zone' + description: | + [Output Only] The name of the zone where the machine type resides, such as us-central1-a. + - !ruby/object:Api::Type::String + name: 'selfLink' + description: | + [Output Only] Server-defined URL for the resource. + - !ruby/object:Api::Type::Boolean + name: 'isSharedCpu' + description: | + [Output Only] Whether this machine type has a shared CPU. See Shared-core machine types for more information. + - !ruby/object:Api::Type::Array + name: 'accelerators' + description: | + [Output Only] A list of accelerator configurations assigned to this machine type. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'guestAcceleratorType' + description: | + The accelerator type resource name, not a full URL, e.g. nvidia-tesla-t4. + - !ruby/object:Api::Type::Integer + name: 'guestAcceleratorCount' + description: | + Number of accelerator cards exposed to the guest. + + + + + - !ruby/object:Api::Resource + name: GlobalNetworkEndpointGroup + base_url: 'projects/{{project}}/global/networkEndpointGroups' + self_link: 'projects/{{project}}/global/networkEndpointGroups/{{networkEndpointGroup}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/compute_v1/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + Represents a collection of network endpoints. A network endpoint group (NEG) defines how a set of endpoints should be reached, whether they are reachable, and where they are located. For more information about using NEGs for different use cases, see Network endpoint groups overview. + properties: + + - !ruby/object:Api::Type::String + name: 'kind' + description: | + [Output Only] Type of the resource. Always compute#networkEndpointGroup for network endpoint group. + - !ruby/object:Api::Type::String + name: 'id' + description: | + [Output Only] The unique identifier for the resource. This identifier is defined by the server. + - !ruby/object:Api::Type::String + name: 'creationTimestamp' + description: | + [Output Only] Creation timestamp in RFC3339 text format. + - !ruby/object:Api::Type::String + name: 'selfLink' + description: | + [Output Only] Server-defined URL for the resource. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. + - !ruby/object:Api::Type::String + name: 'description' + description: | + An optional description of this resource. Provide this property when you create the resource. + - !ruby/object:Api::Type::Enum + name: 'networkEndpointType' + description: | + Type of network endpoints in this network endpoint group. Can be one of GCE_VM_IP, GCE_VM_IP_PORT, NON_GCP_PRIVATE_IP_PORT, INTERNET_FQDN_PORT, INTERNET_IP_PORT, SERVERLESS, PRIVATE_SERVICE_CONNECT. + values: + - :GCE_VM_IP + - :GCE_VM_IP_PORT + - :INTERNET_FQDN_PORT + - :INTERNET_IP_PORT + - :NON_GCP_PRIVATE_IP_PORT + - :PRIVATE_SERVICE_CONNECT + - :SERVERLESS + - !ruby/object:Api::Type::Integer + name: 'size' + description: | + [Output only] Number of network endpoints in the network endpoint group. + - !ruby/object:Api::Type::String + name: 'region' + description: | + [Output Only] The URL of the region where the network endpoint group is located. + - !ruby/object:Api::Type::String + name: 'zone' + description: | + [Output Only] The URL of the zone where the network endpoint group is located. + - !ruby/object:Api::Type::String + name: 'network' + description: | + The URL of the network to which all network endpoints in the NEG belong. Uses "default" project network if unspecified. + - !ruby/object:Api::Type::String + name: 'subnetwork' + description: | + Optional URL of the subnetwork to which all network endpoints in the NEG belong. + - !ruby/object:Api::Type::Integer + name: 'defaultPort' + description: | + The default port used if the port number is not specified in the network endpoint. + - !ruby/object:Api::Type::NestedObject + name: 'annotations' + description: | + Metadata defined as annotations on the network endpoint group. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'cloudRun' + description: | + Configuration for a Cloud Run network endpoint group (NEG). The service must be provided explicitly or in the URL mask. The tag is optional, may be provided explicitly or in the URL mask. Note: Cloud Run service must be in the same project and located in the same region as the Serverless NEG. + properties: + - !ruby/object:Api::Type::String + name: 'service' + description: | + Cloud Run service is the main resource of Cloud Run. The service must be 1-63 characters long, and comply with RFC1035. Example value: "run-service". + - !ruby/object:Api::Type::String + name: 'tag' + description: | + Optional Cloud Run tag represents the "named-revision" to provide additional fine-grained traffic routing information. The tag must be 1-63 characters long, and comply with RFC1035. Example value: "revision-0010". + - !ruby/object:Api::Type::String + name: 'urlMask' + description: | + A template to parse and fields from a request URL. URL mask allows for routing to multiple Run services without having to create multiple network endpoint groups and backend services. For example, request URLs "foo1.domain.com/bar1" and "foo1.domain.com/bar2" can be backed by the same Serverless Network Endpoint Group (NEG) with URL mask ".domain.com/". The URL mask will parse them to { service="bar1", tag="foo1" } and { service="bar2", tag="foo2" } respectively. + - !ruby/object:Api::Type::NestedObject + name: 'appEngine' + description: | + Configuration for an App Engine network endpoint group (NEG). The service is optional, may be provided explicitly or in the URL mask. The version is optional and can only be provided explicitly or in the URL mask when service is present. Note: App Engine service must be in the same project and located in the same region as the Serverless NEG. + properties: + - !ruby/object:Api::Type::String + name: 'service' + description: | + Optional serving service. The service name is case-sensitive and must be 1-63 characters long. Example value: "default", "my-service". + - !ruby/object:Api::Type::String + name: 'version' + description: | + Optional serving version. The version name is case-sensitive and must be 1-100 characters long. Example value: "v1", "v2". + - !ruby/object:Api::Type::String + name: 'urlMask' + description: | + A template to parse service and version fields from a request URL. URL mask allows for routing to multiple App Engine services without having to create multiple Network Endpoint Groups and backend services. For example, the request URLs "foo1-dot-appname.appspot.com/v1" and "foo1-dot-appname.appspot.com/v2" can be backed by the same Serverless NEG with URL mask "-dot-appname.appspot.com/". The URL mask will parse them to { service = "foo1", version = "v1" } and { service = "foo1", version = "v2" } respectively. + - !ruby/object:Api::Type::NestedObject + name: 'cloudFunction' + description: | + Configuration for a Cloud Function network endpoint group (NEG). The function must be provided explicitly or in the URL mask. Note: Cloud Function must be in the same project and located in the same region as the Serverless NEG. + properties: + - !ruby/object:Api::Type::String + name: 'function' + description: | + A user-defined name of the Cloud Function. The function name is case-sensitive and must be 1-63 characters long. Example value: "func1". + - !ruby/object:Api::Type::String + name: 'urlMask' + description: | + A template to parse function field from a request URL. URL mask allows for routing to multiple Cloud Functions without having to create multiple Network Endpoint Groups and backend services. For example, request URLs " mydomain.com/function1" and "mydomain.com/function2" can be backed by the same Serverless NEG with URL mask "/". The URL mask will parse them to { function = "function1" } and { function = "function2" } respectively. + - !ruby/object:Api::Type::String + name: 'pscTargetService' + description: | + The target service url used to set up private service connection to a Google API or a PSC Producer Service Attachment. An example value is: "asia-northeast3-cloudkms.googleapis.com" + - !ruby/object:Api::Type::NestedObject + name: 'pscData' + description: | + All data that is specifically relevant to only network endpoint groups of type PRIVATE_SERVICE_CONNECT. + properties: + - !ruby/object:Api::Type::String + name: 'consumerPscAddress' + description: | + [Output Only] Address allocated from given subnetwork for PSC. This IP address acts as a VIP for a PSC NEG, allowing it to act as an endpoint in L7 PSC-XLB. + - !ruby/object:Api::Type::String + name: 'pscConnectionId' + description: | + [Output Only] The PSC connection id of the PSC Network Endpoint Group Consumer. + - !ruby/object:Api::Type::Enum + name: 'pscConnectionStatus' + description: | + [Output Only] The connection status of the PSC Forwarding Rule. + values: + - :ACCEPTED + - :CLOSED + - :NEEDS_ATTENTION + - :PENDING + - :REJECTED + - :STATUS_UNSPECIFIED + + + + + - !ruby/object:Api::Resource + name: MachineImage + base_url: 'projects/{{project}}/global/machineImages' + self_link: 'projects/{{project}}/global/machineImages/{{machineImage}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/compute_v1/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + Represents a machine image resource. A machine image is a Compute Engine resource that stores all the configuration, metadata, permissions, and data from one or more disks required to create a Virtual machine (VM) instance. For more information, see Machine images. + properties: + + - !ruby/object:Api::Type::String + name: 'kind' + description: | + [Output Only] The resource type, which is always compute#machineImage for machine image. + - !ruby/object:Api::Type::String + name: 'id' + description: | + [Output Only] A unique identifier for this machine image. The server defines this identifier. + - !ruby/object:Api::Type::String + name: 'creationTimestamp' + description: | + [Output Only] The creation timestamp for this machine image in RFC3339 text format. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. + - !ruby/object:Api::Type::String + name: 'description' + description: | + An optional description of this resource. Provide this property when you create the resource. + - !ruby/object:Api::Type::String + name: 'selfLink' + description: | + [Output Only] The URL for this machine image. The server defines this URL. + - !ruby/object:Api::Type::String + name: 'sourceInstance' + description: | + The source instance used to create the machine image. You can provide this as a partial or full URL to the resource. For example, the following are valid values: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /instances/instance - projects/project/zones/zone/instances/instance + - !ruby/object:Api::Type::Enum + name: 'status' + description: | + [Output Only] The status of the machine image. One of the following values: INVALID, CREATING, READY, DELETING, and UPLOADING. + values: + - :CREATING + - :DELETING + - :INVALID + - :READY + - :UPLOADING + - !ruby/object:Api::Type::NestedObject + name: 'sourceInstanceProperties' + description: | + DEPRECATED: Please use compute#instanceProperties instead. New properties will not be added to this field. + properties: + - !ruby/object:Api::Type::String + name: 'description' + description: | + An optional text description for the instances that are created from this machine image. + - !ruby/object:Api::Type::NestedObject + name: 'tags' + description: | + A set of instance tags. + properties: + - !ruby/object:Api::Type::Array + name: 'items' + description: | + An array of tags. Each tag must be 1-63 characters long, and comply with RFC1035. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'fingerprint' + description: | + Specifies a fingerprint for this request, which is essentially a hash of the tags' contents and used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update tags. You must always provide an up-to-date fingerprint hash in order to update or change tags. To see the latest fingerprint, make get() request to the instance. + - !ruby/object:Api::Type::String + name: 'machineType' + description: | + The machine type to use for instances that are created from this machine image. + - !ruby/object:Api::Type::Boolean + name: 'canIpForward' + description: | + Enables instances created based on this machine image to send packets with source IP addresses other than their own and receive packets with destination IP addresses other than their own. If these instances will be used as an IP gateway or it will be set as the next-hop in a Route resource, specify true. If unsure, leave this set to false. See the Enable IP forwarding documentation for more information. + - !ruby/object:Api::Type::Array + name: 'networkInterfaces' + description: | + An array of network access configurations for this interface. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'kind' + description: | + [Output Only] Type of the resource. Always compute#networkInterface for network interfaces. + - !ruby/object:Api::Type::String + name: 'network' + description: | + URL of the VPC network resource for this instance. When creating an instance, if neither the network nor the subnetwork is specified, the default network global/networks/default is used. If the selected project doesn't have the default network, you must specify a network or subnet. If the network is not specified but the subnetwork is specified, the network is inferred. If you specify this property, you can specify the network as a full or partial URL. For example, the following are all valid URLs: - https://www.googleapis.com/compute/v1/projects/project/global/networks/ network - projects/project/global/networks/network - global/networks/default + - !ruby/object:Api::Type::String + name: 'subnetwork' + description: | + The URL of the Subnetwork resource for this instance. If the network resource is in legacy mode, do not specify this field. If the network is in auto subnet mode, specifying the subnetwork is optional. If the network is in custom subnet mode, specifying the subnetwork is required. If you specify this field, you can specify the subnetwork as a full or partial URL. For example, the following are all valid URLs: - https://www.googleapis.com/compute/v1/projects/project/regions/region /subnetworks/subnetwork - regions/region/subnetworks/subnetwork + - !ruby/object:Api::Type::String + name: 'networkIP' + description: | + An IPv4 internal IP address to assign to the instance for this network interface. If not specified by the user, an unused internal IP is assigned by the system. + - !ruby/object:Api::Type::String + name: 'ipv6Address' + description: | + An IPv6 internal network address for this network interface. To use a static internal IP address, it must be unused and in the same region as the instance's zone. If not specified, Google Cloud will automatically assign an internal IPv6 address from the instance's subnetwork. + - !ruby/object:Api::Type::Integer + name: 'internalIpv6PrefixLength' + description: | + The prefix length of the primary internal IPv6 range. + - !ruby/object:Api::Type::String + name: 'name' + description: | + [Output Only] The name of the network interface, which is generated by the server. For a VM, the network interface uses the nicN naming format. Where N is a value between 0 and 7. The default interface value is nic0. + - !ruby/object:Api::Type::Array + name: 'accessConfigs' + description: | + An array of configurations for this interface. Currently, only one access config, ONE_TO_ONE_NAT, is supported. If there are no accessConfigs specified, then this instance will have no external internet access. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'kind' + description: | + [Output Only] Type of the resource. Always compute#accessConfig for access configs. + - !ruby/object:Api::Type::Enum + name: 'type' + description: | + The type of configuration. In accessConfigs (IPv4), the default and only option is ONE_TO_ONE_NAT. In ipv6AccessConfigs, the default and only option is DIRECT_IPV6. + values: + - :DIRECT_IPV6 + - :ONE_TO_ONE_NAT + - !ruby/object:Api::Type::String + name: 'name' + description: | + The name of this access configuration. In accessConfigs (IPv4), the default and recommended name is External NAT, but you can use any arbitrary string, such as My external IP or Network Access. In ipv6AccessConfigs, the recommend name is External IPv6. + - !ruby/object:Api::Type::String + name: 'natIP' + description: | + Applies to accessConfigs (IPv4) only. An external IP address associated with this instance. Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance. + - !ruby/object:Api::Type::String + name: 'externalIpv6' + description: | + Applies to ipv6AccessConfigs only. The first IPv6 address of the external IPv6 range associated with this instance, prefix length is stored in externalIpv6PrefixLength in ipv6AccessConfig. To use a static external IP address, it must be unused and in the same region as the instance's zone. If not specified, Google Cloud will automatically assign an external IPv6 address from the instance's subnetwork. + - !ruby/object:Api::Type::Integer + name: 'externalIpv6PrefixLength' + description: | + Applies to ipv6AccessConfigs only. The prefix length of the external IPv6 range. + - !ruby/object:Api::Type::Boolean + name: 'setPublicPtr' + description: | + Specifies whether a public DNS 'PTR' record should be created to map the external IP address of the instance to a DNS domain name. This field is not used in ipv6AccessConfig. A default PTR record will be created if the VM has external IPv6 range associated. + - !ruby/object:Api::Type::String + name: 'publicPtrDomainName' + description: | + The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be createc for first IP in associated external IPv6 range. + - !ruby/object:Api::Type::Enum + name: 'networkTier' + description: | + This signifies the networking tier used for configuring this access configuration and can only take the following values: PREMIUM, STANDARD. If an AccessConfig is specified without a valid external IP address, an ephemeral IP will be created with this networkTier. If an AccessConfig with a valid external IP address is specified, it must match that of the networkTier associated with the Address resource owning that IP. + values: + - :FIXED_STANDARD + - :PREMIUM + - :STANDARD + - :STANDARD_OVERRIDES_FIXED_STANDARD + - !ruby/object:Api::Type::String + name: 'securityPolicy' + description: | + [Output Only] The resource URL for the security policy associated with this access config. + - !ruby/object:Api::Type::Array + name: 'ipv6AccessConfigs' + description: | + An array of IPv6 access configurations for this interface. Currently, only one IPv6 access config, DIRECT_IPV6, is supported. If there is no ipv6AccessConfig specified, then this instance will have no external IPv6 Internet access. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'kind' + description: | + [Output Only] Type of the resource. Always compute#accessConfig for access configs. + - !ruby/object:Api::Type::Enum + name: 'type' + description: | + The type of configuration. In accessConfigs (IPv4), the default and only option is ONE_TO_ONE_NAT. In ipv6AccessConfigs, the default and only option is DIRECT_IPV6. + values: + - :DIRECT_IPV6 + - :ONE_TO_ONE_NAT + - !ruby/object:Api::Type::String + name: 'name' + description: | + The name of this access configuration. In accessConfigs (IPv4), the default and recommended name is External NAT, but you can use any arbitrary string, such as My external IP or Network Access. In ipv6AccessConfigs, the recommend name is External IPv6. + - !ruby/object:Api::Type::String + name: 'natIP' + description: | + Applies to accessConfigs (IPv4) only. An external IP address associated with this instance. Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance. + - !ruby/object:Api::Type::String + name: 'externalIpv6' + description: | + Applies to ipv6AccessConfigs only. The first IPv6 address of the external IPv6 range associated with this instance, prefix length is stored in externalIpv6PrefixLength in ipv6AccessConfig. To use a static external IP address, it must be unused and in the same region as the instance's zone. If not specified, Google Cloud will automatically assign an external IPv6 address from the instance's subnetwork. + - !ruby/object:Api::Type::Integer + name: 'externalIpv6PrefixLength' + description: | + Applies to ipv6AccessConfigs only. The prefix length of the external IPv6 range. + - !ruby/object:Api::Type::Boolean + name: 'setPublicPtr' + description: | + Specifies whether a public DNS 'PTR' record should be created to map the external IP address of the instance to a DNS domain name. This field is not used in ipv6AccessConfig. A default PTR record will be created if the VM has external IPv6 range associated. + - !ruby/object:Api::Type::String + name: 'publicPtrDomainName' + description: | + The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be createc for first IP in associated external IPv6 range. + - !ruby/object:Api::Type::Enum + name: 'networkTier' + description: | + This signifies the networking tier used for configuring this access configuration and can only take the following values: PREMIUM, STANDARD. If an AccessConfig is specified without a valid external IP address, an ephemeral IP will be created with this networkTier. If an AccessConfig with a valid external IP address is specified, it must match that of the networkTier associated with the Address resource owning that IP. + values: + - :FIXED_STANDARD + - :PREMIUM + - :STANDARD + - :STANDARD_OVERRIDES_FIXED_STANDARD + - !ruby/object:Api::Type::String + name: 'securityPolicy' + description: | + [Output Only] The resource URL for the security policy associated with this access config. + - !ruby/object:Api::Type::Array + name: 'aliasIpRanges' + description: | + An array of alias IP ranges for this network interface. You can only specify this field for network interfaces in VPC networks. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'ipCidrRange' + description: | + The IP alias ranges to allocate for this interface. This IP CIDR range must belong to the specified subnetwork and cannot contain IP addresses reserved by system or used by other network interfaces. This range may be a single IP address (such as 10.2.3.4), a netmask (such as /24) or a CIDR-formatted string (such as 10.1.2.0/24). + - !ruby/object:Api::Type::String + name: 'subnetworkRangeName' + description: | + The name of a subnetwork secondary IP range from which to allocate an IP alias range. If not specified, the primary range of the subnetwork is used. + - !ruby/object:Api::Type::String + name: 'fingerprint' + description: | + Fingerprint hash of contents stored in this network interface. This field will be ignored when inserting an Instance or adding a NetworkInterface. An up-to-date fingerprint must be provided in order to update the NetworkInterface. The request will fail with error 400 Bad Request if the fingerprint is not provided, or 412 Precondition Failed if the fingerprint is out of date. + - !ruby/object:Api::Type::Enum + name: 'stackType' + description: | + The stack type for this network interface. To assign only IPv4 addresses, use IPV4_ONLY. To assign both IPv4 and IPv6 addresses, use IPV4_IPV6. If not specified, IPV4_ONLY is used. This field can be both set at instance creation and update network interface operations. + values: + - :IPV4_IPV6 + - :IPV4_ONLY + - !ruby/object:Api::Type::Enum + name: 'ipv6AccessType' + description: | + [Output Only] One of EXTERNAL, INTERNAL to indicate whether the IP can be accessed from the Internet. This field is always inherited from its subnetwork. Valid only if stackType is IPV4_IPV6. + values: + - :EXTERNAL + - :INTERNAL + - !ruby/object:Api::Type::Integer + name: 'queueCount' + description: | + The networking queue count that's specified by users for the network interface. Both Rx and Tx queues will be set to this number. It'll be empty if not specified by the users. + - !ruby/object:Api::Type::Enum + name: 'nicType' + description: | + The type of vNIC to be used on this interface. This may be gVNIC or VirtioNet. + values: + - :GVNIC + - :UNSPECIFIED_NIC_TYPE + - :VIRTIO_NET + - !ruby/object:Api::Type::String + name: 'networkAttachment' + description: | + The URL of the network attachment that this interface should connect to in the following format: projects/{project_number}/regions/{region_name}/networkAttachments/{network_attachment_name}. + - !ruby/object:Api::Type::Array + name: 'disks' + description: | + An array of disks that are associated with the instances that are created from this machine image. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'kind' + description: | + [Output Only] Type of the resource. Always compute#attachedDisk for attached disks. + - !ruby/object:Api::Type::Enum + name: 'type' + description: | + Specifies the type of the attached disk, either SCRATCH or PERSISTENT. + values: + - :PERSISTENT + - :SCRATCH + - !ruby/object:Api::Type::Enum + name: 'mode' + description: | + The mode in which this disk is attached to the source instance, either READ_WRITE or READ_ONLY. + values: + - :READ_ONLY + - :READ_WRITE + - !ruby/object:Api::Type::String + name: 'source' + description: | + Specifies a URL of the disk attached to the source instance. + - !ruby/object:Api::Type::String + name: 'deviceName' + description: | + Specifies the name of the disk attached to the source instance. + - !ruby/object:Api::Type::Integer + name: 'index' + description: | + Specifies zero-based index of the disk that is attached to the source instance. + - !ruby/object:Api::Type::Boolean + name: 'boot' + description: | + Indicates that this is a boot disk. The virtual machine will use the first partition of the disk for its root filesystem. + - !ruby/object:Api::Type::Boolean + name: 'autoDelete' + description: | + Specifies whether the disk will be auto-deleted when the instance is deleted (but not when the disk is detached from the instance). + - !ruby/object:Api::Type::Array + name: 'licenses' + description: | + [Output Only] Any valid publicly visible licenses. + item_type: Api::Type::String + - !ruby/object:Api::Type::Enum + name: 'interface' + description: | + Specifies the disk interface to use for attaching this disk, which is either SCSI or NVME. + values: + - :NVME + - :SCSI + - !ruby/object:Api::Type::Array + name: 'guestOsFeatures' + description: | + A list of features to enable on the guest operating system. Applicable only for bootable images. Read Enabling guest operating system features to see a list of available options. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::Enum + name: 'type' + description: | + The ID of a supported feature. To add multiple values, use commas to separate values. Set to one or more of the following values: - VIRTIO_SCSI_MULTIQUEUE - WINDOWS - MULTI_IP_SUBNET - UEFI_COMPATIBLE - GVNIC - SEV_CAPABLE - SUSPEND_RESUME_COMPATIBLE - SEV_LIVE_MIGRATABLE - SEV_SNP_CAPABLE For more information, see Enabling guest operating system features. + values: + - :FEATURE_TYPE_UNSPECIFIED + - :GVNIC + - :MULTI_IP_SUBNET + - :SECURE_BOOT + - :SEV_CAPABLE + - :SEV_LIVE_MIGRATABLE + - :SEV_LIVE_MIGRATABLE_V2 + - :SEV_SNP_CAPABLE + - :UEFI_COMPATIBLE + - :VIRTIO_SCSI_MULTIQUEUE + - :WINDOWS + - !ruby/object:Api::Type::NestedObject + name: 'diskEncryptionKey' + description: | + + properties: + - !ruby/object:Api::Type::String + name: 'rawKey' + description: | + Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 base64 to either encrypt or decrypt this resource. You can provide either the rawKey or the rsaEncryptedKey. For example: "rawKey": "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0=" + - !ruby/object:Api::Type::String + name: 'rsaEncryptedKey' + description: | + Specifies an RFC 4648 base64 encoded, RSA-wrapped 2048-bit customer-supplied encryption key to either encrypt or decrypt this resource. You can provide either the rawKey or the rsaEncryptedKey. For example: "rsaEncryptedKey": "ieCx/NcW06PcT7Ep1X6LUTc/hLvUDYyzSZPPVCVPTVEohpeHASqC8uw5TzyO9U+Fka9JFH z0mBibXUInrC/jEk014kCK/NPjYgEMOyssZ4ZINPKxlUh2zn1bV+MCaTICrdmuSBTWlUUiFoD D6PYznLwh8ZNdaheCeZ8ewEXgFQ8V+sDroLaN3Xs3MDTXQEMMoNUXMCZEIpg9Vtp9x2oe==" The key must meet the following requirements before you can provide it to Compute Engine: 1. The key is wrapped using a RSA public key certificate provided by Google. 2. After being wrapped, the key must be encoded in RFC 4648 base64 encoding. Gets the RSA public key certificate provided by Google at: https://cloud-certs.storage.googleapis.com/google-cloud-csek-ingress.pem + - !ruby/object:Api::Type::String + name: 'kmsKeyName' + description: | + The name of the encryption key that is stored in Google Cloud KMS. For example: "kmsKeyName": "projects/kms_project_id/locations/region/keyRings/ key_region/cryptoKeys/key The fully-qualifed key name may be returned for resource GET requests. For example: "kmsKeyName": "projects/kms_project_id/locations/region/keyRings/ key_region/cryptoKeys/key /cryptoKeyVersions/1 + - !ruby/object:Api::Type::String + name: 'sha256' + description: | + [Output only] The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied encryption key that protects this resource. + - !ruby/object:Api::Type::String + name: 'kmsKeyServiceAccount' + description: | + The service account being used for the encryption request for the given KMS key. If absent, the Compute Engine default service account is used. For example: "kmsKeyServiceAccount": "name@project_id.iam.gserviceaccount.com/ + - !ruby/object:Api::Type::String + name: 'diskSizeGb' + description: | + The size of the disk in base-2 GB. + - !ruby/object:Api::Type::String + name: 'storageBytes' + description: | + [Output Only] A size of the storage used by the disk's snapshot by this machine image. + - !ruby/object:Api::Type::Enum + name: 'storageBytesStatus' + description: | + [Output Only] An indicator whether storageBytes is in a stable state or it is being adjusted as a result of shared storage reallocation. This status can either be UPDATING, meaning the size of the snapshot is being updated, or UP_TO_DATE, meaning the size of the snapshot is up-to-date. + values: + - :UPDATING + - :UP_TO_DATE + - !ruby/object:Api::Type::String + name: 'diskType' + description: | + [Output Only] URL of the disk type resource. For example: projects/project /zones/zone/diskTypes/pd-standard or pd-ssd + - !ruby/object:Api::Type::NestedObject + name: 'metadata' + description: | + A metadata key/value entry. + properties: + - !ruby/object:Api::Type::String + name: 'kind' + description: | + [Output Only] Type of the resource. Always compute#metadata for metadata. + - !ruby/object:Api::Type::String + name: 'fingerprint' + description: | + Specifies a fingerprint for this request, which is essentially a hash of the metadata's contents and used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update metadata. You must always provide an up-to-date fingerprint hash in order to update or change metadata, otherwise the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make a get() request to retrieve the resource. + - !ruby/object:Api::Type::Array + name: 'items' + description: | + Array of key/value pairs. The total size of all keys and values must be less than 512 KB. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'key' + description: | + Key for the metadata entry. Keys must conform to the following regexp: [a-zA-Z0-9-_]+, and be less than 128 bytes in length. This is reflected as part of a URL in the metadata server. Additionally, to avoid ambiguity, keys must not conflict with any other metadata keys for the project. + - !ruby/object:Api::Type::String + name: 'value' + description: | + Value for the metadata entry. These are free-form strings, and only have meaning as interpreted by the image running in the instance. The only restriction placed on values is that their size must be less than or equal to 262144 bytes (256 KiB). + - !ruby/object:Api::Type::Array + name: 'serviceAccounts' + description: | + A list of service accounts with specified scopes. Access tokens for these service accounts are available to the instances that are created from this machine image. Use metadata queries to obtain the access tokens for these instances. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'email' + description: | + Email address of the service account. + - !ruby/object:Api::Type::Array + name: 'scopes' + description: | + The list of scopes to be made available for this service account. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'scheduling' + description: | + Sets the scheduling options for an Instance. + properties: + - !ruby/object:Api::Type::Enum + name: 'onHostMaintenance' + description: | + Defines the maintenance behavior for this instance. For standard instances, the default behavior is MIGRATE. For preemptible instances, the default and only possible behavior is TERMINATE. For more information, see Set VM host maintenance policy. + values: + - :MIGRATE + - :TERMINATE + - !ruby/object:Api::Type::Boolean + name: 'automaticRestart' + description: | + Specifies whether the instance should be automatically restarted if it is terminated by Compute Engine (not terminated by a user). You can only set the automatic restart option for standard instances. Preemptible instances cannot be automatically restarted. By default, this is set to true so an instance is automatically restarted if it is terminated by Compute Engine. + - !ruby/object:Api::Type::Boolean + name: 'preemptible' + description: | + Defines whether the instance is preemptible. This can only be set during instance creation or while the instance is stopped and therefore, in a `TERMINATED` state. See Instance Life Cycle for more information on the possible instance states. + - !ruby/object:Api::Type::Array + name: 'nodeAffinities' + description: | + A set of node affinity and anti-affinity configurations. Refer to Configuring node affinity for more information. Overrides reservationAffinity. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'key' + description: | + Corresponds to the label key of Node resource. + - !ruby/object:Api::Type::Enum + name: 'operator' + description: | + Defines the operation of node selection. Valid operators are IN for affinity and NOT_IN for anti-affinity. + values: + - :IN + - :NOT_IN + - :OPERATOR_UNSPECIFIED + - !ruby/object:Api::Type::Array + name: 'values' + description: | + Corresponds to the label values of Node resource. + item_type: Api::Type::String + - !ruby/object:Api::Type::Integer + name: 'minNodeCpus' + description: | + The minimum number of virtual CPUs this instance will consume when running on a sole-tenant node. + - !ruby/object:Api::Type::String + name: 'locationHint' + description: | + An opaque location hint used to place the instance close to other resources. This field is for use by internal tools that use the public API. + - !ruby/object:Api::Type::Enum + name: 'provisioningModel' + description: | + Specifies the provisioning model of the instance. + values: + - :SPOT + - :STANDARD + - !ruby/object:Api::Type::Enum + name: 'instanceTerminationAction' + description: | + Specifies the termination action for the instance. + values: + - :DELETE + - :INSTANCE_TERMINATION_ACTION_UNSPECIFIED + - :STOP + - !ruby/object:Api::Type::NestedObject + name: 'localSsdRecoveryTimeout' + description: | + A Duration represents a fixed-length span of time represented as a count of seconds and fractions of seconds at nanosecond resolution. It is independent of any calendar and concepts like "day" or "month". Range is approximately 10,000 years. + properties: + - !ruby/object:Api::Type::String + name: 'seconds' + description: | + Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 inclusive. Note: these bounds are computed from: 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years + - !ruby/object:Api::Type::Integer + name: 'nanos' + description: | + Span of time that's a fraction of a second at nanosecond resolution. Durations less than one second are represented with a 0 `seconds` field and a positive `nanos` field. Must be from 0 to 999,999,999 inclusive. + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + Labels to apply to instances that are created from this machine image. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::Array + name: 'guestAccelerators' + description: | + A list of guest accelerator cards' type and count to use for instances created from this machine image. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'acceleratorType' + description: | + Full or partial URL of the accelerator type resource to attach to this instance. For example: projects/my-project/zones/us-central1-c/acceleratorTypes/nvidia-tesla-p100 If you are creating an instance template, specify only the accelerator name. See GPUs on Compute Engine for a full list of accelerator types. + - !ruby/object:Api::Type::Integer + name: 'acceleratorCount' + description: | + The number of the guest accelerator cards exposed to this instance. + - !ruby/object:Api::Type::String + name: 'minCpuPlatform' + description: | + Minimum cpu/platform to be used by instances created from this machine image. The instance may be scheduled on the specified or newer cpu/platform. Applicable values are the friendly names of CPU platforms, such as minCpuPlatform: "Intel Haswell" or minCpuPlatform: "Intel Sandy Bridge". For more information, read Specifying a Minimum CPU Platform. + - !ruby/object:Api::Type::Boolean + name: 'deletionProtection' + description: | + Whether the instance created from this machine image should be protected against deletion. + - !ruby/object:Api::Type::Enum + name: 'keyRevocationActionType' + description: | + KeyRevocationActionType of the instance. Supported options are "STOP" and "NONE". The default value is "NONE" if it is not specified. + values: + - :KEY_REVOCATION_ACTION_TYPE_UNSPECIFIED + - :NONE + - :STOP + - !ruby/object:Api::Type::NestedObject + name: 'instanceProperties' + description: | + + properties: + - !ruby/object:Api::Type::String + name: 'description' + description: | + An optional text description for the instances that are created from these properties. + - !ruby/object:Api::Type::NestedObject + name: 'tags' + description: | + A set of instance tags. + properties: + - !ruby/object:Api::Type::Array + name: 'items' + description: | + An array of tags. Each tag must be 1-63 characters long, and comply with RFC1035. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'fingerprint' + description: | + Specifies a fingerprint for this request, which is essentially a hash of the tags' contents and used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update tags. You must always provide an up-to-date fingerprint hash in order to update or change tags. To see the latest fingerprint, make get() request to the instance. + - !ruby/object:Api::Type::NestedObject + name: 'resourceManagerTags' + description: | + Resource manager tags to be bound to the instance. Tag keys and values have the same definition as resource manager tags. Keys must be in the format `tagKeys/{tag_key_id}`, and values are in the format `tagValues/456`. The field is ignored (both PUT & PATCH) when empty. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::String + name: 'machineType' + description: | + The machine type to use for instances that are created from these properties. + - !ruby/object:Api::Type::Boolean + name: 'canIpForward' + description: | + Enables instances created based on these properties to send packets with source IP addresses other than their own and receive packets with destination IP addresses other than their own. If these instances will be used as an IP gateway or it will be set as the next-hop in a Route resource, specify true. If unsure, leave this set to false. See the Enable IP forwarding documentation for more information. + - !ruby/object:Api::Type::Array + name: 'networkInterfaces' + description: | + An array of network access configurations for this interface. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'kind' + description: | + [Output Only] Type of the resource. Always compute#networkInterface for network interfaces. + - !ruby/object:Api::Type::String + name: 'network' + description: | + URL of the VPC network resource for this instance. When creating an instance, if neither the network nor the subnetwork is specified, the default network global/networks/default is used. If the selected project doesn't have the default network, you must specify a network or subnet. If the network is not specified but the subnetwork is specified, the network is inferred. If you specify this property, you can specify the network as a full or partial URL. For example, the following are all valid URLs: - https://www.googleapis.com/compute/v1/projects/project/global/networks/ network - projects/project/global/networks/network - global/networks/default + - !ruby/object:Api::Type::String + name: 'subnetwork' + description: | + The URL of the Subnetwork resource for this instance. If the network resource is in legacy mode, do not specify this field. If the network is in auto subnet mode, specifying the subnetwork is optional. If the network is in custom subnet mode, specifying the subnetwork is required. If you specify this field, you can specify the subnetwork as a full or partial URL. For example, the following are all valid URLs: - https://www.googleapis.com/compute/v1/projects/project/regions/region /subnetworks/subnetwork - regions/region/subnetworks/subnetwork + - !ruby/object:Api::Type::String + name: 'networkIP' + description: | + An IPv4 internal IP address to assign to the instance for this network interface. If not specified by the user, an unused internal IP is assigned by the system. + - !ruby/object:Api::Type::String + name: 'ipv6Address' + description: | + An IPv6 internal network address for this network interface. To use a static internal IP address, it must be unused and in the same region as the instance's zone. If not specified, Google Cloud will automatically assign an internal IPv6 address from the instance's subnetwork. + - !ruby/object:Api::Type::Integer + name: 'internalIpv6PrefixLength' + description: | + The prefix length of the primary internal IPv6 range. + - !ruby/object:Api::Type::String + name: 'name' + description: | + [Output Only] The name of the network interface, which is generated by the server. For a VM, the network interface uses the nicN naming format. Where N is a value between 0 and 7. The default interface value is nic0. + - !ruby/object:Api::Type::Array + name: 'accessConfigs' + description: | + An array of configurations for this interface. Currently, only one access config, ONE_TO_ONE_NAT, is supported. If there are no accessConfigs specified, then this instance will have no external internet access. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'kind' + description: | + [Output Only] Type of the resource. Always compute#accessConfig for access configs. + - !ruby/object:Api::Type::Enum + name: 'type' + description: | + The type of configuration. In accessConfigs (IPv4), the default and only option is ONE_TO_ONE_NAT. In ipv6AccessConfigs, the default and only option is DIRECT_IPV6. + values: + - :DIRECT_IPV6 + - :ONE_TO_ONE_NAT + - !ruby/object:Api::Type::String + name: 'name' + description: | + The name of this access configuration. In accessConfigs (IPv4), the default and recommended name is External NAT, but you can use any arbitrary string, such as My external IP or Network Access. In ipv6AccessConfigs, the recommend name is External IPv6. + - !ruby/object:Api::Type::String + name: 'natIP' + description: | + Applies to accessConfigs (IPv4) only. An external IP address associated with this instance. Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance. + - !ruby/object:Api::Type::String + name: 'externalIpv6' + description: | + Applies to ipv6AccessConfigs only. The first IPv6 address of the external IPv6 range associated with this instance, prefix length is stored in externalIpv6PrefixLength in ipv6AccessConfig. To use a static external IP address, it must be unused and in the same region as the instance's zone. If not specified, Google Cloud will automatically assign an external IPv6 address from the instance's subnetwork. + - !ruby/object:Api::Type::Integer + name: 'externalIpv6PrefixLength' + description: | + Applies to ipv6AccessConfigs only. The prefix length of the external IPv6 range. + - !ruby/object:Api::Type::Boolean + name: 'setPublicPtr' + description: | + Specifies whether a public DNS 'PTR' record should be created to map the external IP address of the instance to a DNS domain name. This field is not used in ipv6AccessConfig. A default PTR record will be created if the VM has external IPv6 range associated. + - !ruby/object:Api::Type::String + name: 'publicPtrDomainName' + description: | + The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be createc for first IP in associated external IPv6 range. + - !ruby/object:Api::Type::Enum + name: 'networkTier' + description: | + This signifies the networking tier used for configuring this access configuration and can only take the following values: PREMIUM, STANDARD. If an AccessConfig is specified without a valid external IP address, an ephemeral IP will be created with this networkTier. If an AccessConfig with a valid external IP address is specified, it must match that of the networkTier associated with the Address resource owning that IP. + values: + - :FIXED_STANDARD + - :PREMIUM + - :STANDARD + - :STANDARD_OVERRIDES_FIXED_STANDARD + - !ruby/object:Api::Type::String + name: 'securityPolicy' + description: | + [Output Only] The resource URL for the security policy associated with this access config. + - !ruby/object:Api::Type::Array + name: 'ipv6AccessConfigs' + description: | + An array of IPv6 access configurations for this interface. Currently, only one IPv6 access config, DIRECT_IPV6, is supported. If there is no ipv6AccessConfig specified, then this instance will have no external IPv6 Internet access. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'kind' + description: | + [Output Only] Type of the resource. Always compute#accessConfig for access configs. + - !ruby/object:Api::Type::Enum + name: 'type' + description: | + The type of configuration. In accessConfigs (IPv4), the default and only option is ONE_TO_ONE_NAT. In ipv6AccessConfigs, the default and only option is DIRECT_IPV6. + values: + - :DIRECT_IPV6 + - :ONE_TO_ONE_NAT + - !ruby/object:Api::Type::String + name: 'name' + description: | + The name of this access configuration. In accessConfigs (IPv4), the default and recommended name is External NAT, but you can use any arbitrary string, such as My external IP or Network Access. In ipv6AccessConfigs, the recommend name is External IPv6. + - !ruby/object:Api::Type::String + name: 'natIP' + description: | + Applies to accessConfigs (IPv4) only. An external IP address associated with this instance. Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance. + - !ruby/object:Api::Type::String + name: 'externalIpv6' + description: | + Applies to ipv6AccessConfigs only. The first IPv6 address of the external IPv6 range associated with this instance, prefix length is stored in externalIpv6PrefixLength in ipv6AccessConfig. To use a static external IP address, it must be unused and in the same region as the instance's zone. If not specified, Google Cloud will automatically assign an external IPv6 address from the instance's subnetwork. + - !ruby/object:Api::Type::Integer + name: 'externalIpv6PrefixLength' + description: | + Applies to ipv6AccessConfigs only. The prefix length of the external IPv6 range. + - !ruby/object:Api::Type::Boolean + name: 'setPublicPtr' + description: | + Specifies whether a public DNS 'PTR' record should be created to map the external IP address of the instance to a DNS domain name. This field is not used in ipv6AccessConfig. A default PTR record will be created if the VM has external IPv6 range associated. + - !ruby/object:Api::Type::String + name: 'publicPtrDomainName' + description: | + The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be createc for first IP in associated external IPv6 range. + - !ruby/object:Api::Type::Enum + name: 'networkTier' + description: | + This signifies the networking tier used for configuring this access configuration and can only take the following values: PREMIUM, STANDARD. If an AccessConfig is specified without a valid external IP address, an ephemeral IP will be created with this networkTier. If an AccessConfig with a valid external IP address is specified, it must match that of the networkTier associated with the Address resource owning that IP. + values: + - :FIXED_STANDARD + - :PREMIUM + - :STANDARD + - :STANDARD_OVERRIDES_FIXED_STANDARD + - !ruby/object:Api::Type::String + name: 'securityPolicy' + description: | + [Output Only] The resource URL for the security policy associated with this access config. + - !ruby/object:Api::Type::Array + name: 'aliasIpRanges' + description: | + An array of alias IP ranges for this network interface. You can only specify this field for network interfaces in VPC networks. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'ipCidrRange' + description: | + The IP alias ranges to allocate for this interface. This IP CIDR range must belong to the specified subnetwork and cannot contain IP addresses reserved by system or used by other network interfaces. This range may be a single IP address (such as 10.2.3.4), a netmask (such as /24) or a CIDR-formatted string (such as 10.1.2.0/24). + - !ruby/object:Api::Type::String + name: 'subnetworkRangeName' + description: | + The name of a subnetwork secondary IP range from which to allocate an IP alias range. If not specified, the primary range of the subnetwork is used. + - !ruby/object:Api::Type::String + name: 'fingerprint' + description: | + Fingerprint hash of contents stored in this network interface. This field will be ignored when inserting an Instance or adding a NetworkInterface. An up-to-date fingerprint must be provided in order to update the NetworkInterface. The request will fail with error 400 Bad Request if the fingerprint is not provided, or 412 Precondition Failed if the fingerprint is out of date. + - !ruby/object:Api::Type::Enum + name: 'stackType' + description: | + The stack type for this network interface. To assign only IPv4 addresses, use IPV4_ONLY. To assign both IPv4 and IPv6 addresses, use IPV4_IPV6. If not specified, IPV4_ONLY is used. This field can be both set at instance creation and update network interface operations. + values: + - :IPV4_IPV6 + - :IPV4_ONLY + - !ruby/object:Api::Type::Enum + name: 'ipv6AccessType' + description: | + [Output Only] One of EXTERNAL, INTERNAL to indicate whether the IP can be accessed from the Internet. This field is always inherited from its subnetwork. Valid only if stackType is IPV4_IPV6. + values: + - :EXTERNAL + - :INTERNAL + - !ruby/object:Api::Type::Integer + name: 'queueCount' + description: | + The networking queue count that's specified by users for the network interface. Both Rx and Tx queues will be set to this number. It'll be empty if not specified by the users. + - !ruby/object:Api::Type::Enum + name: 'nicType' + description: | + The type of vNIC to be used on this interface. This may be gVNIC or VirtioNet. + values: + - :GVNIC + - :UNSPECIFIED_NIC_TYPE + - :VIRTIO_NET + - !ruby/object:Api::Type::String + name: 'networkAttachment' + description: | + The URL of the network attachment that this interface should connect to in the following format: projects/{project_number}/regions/{region_name}/networkAttachments/{network_attachment_name}. + - !ruby/object:Api::Type::Array + name: 'disks' + description: | + An array of disks that are associated with the instances that are created from these properties. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'kind' + description: | + [Output Only] Type of the resource. Always compute#attachedDisk for attached disks. + - !ruby/object:Api::Type::Enum + name: 'type' + description: | + Specifies the type of the disk, either SCRATCH or PERSISTENT. If not specified, the default is PERSISTENT. + values: + - :PERSISTENT + - :SCRATCH + - !ruby/object:Api::Type::Enum + name: 'mode' + description: | + The mode in which to attach this disk, either READ_WRITE or READ_ONLY. If not specified, the default is to attach the disk in READ_WRITE mode. + values: + - :READ_ONLY + - :READ_WRITE + - !ruby/object:Api::Type::Enum + name: 'savedState' + description: | + For LocalSSD disks on VM Instances in STOPPED or SUSPENDED state, this field is set to PRESERVED if the LocalSSD data has been saved to a persistent location by customer request. (see the discard_local_ssd option on Stop/Suspend). Read-only in the api. + values: + - :DISK_SAVED_STATE_UNSPECIFIED + - :PRESERVED + - !ruby/object:Api::Type::String + name: 'source' + description: | + Specifies a valid partial or full URL to an existing Persistent Disk resource. When creating a new instance, one of initializeParams.sourceImage or initializeParams.sourceSnapshot or disks.source is required except for local SSD. If desired, you can also attach existing non-root persistent disks using this property. This field is only applicable for persistent disks. Note that for InstanceTemplate, specify the disk name for zonal disk, and the URL for regional disk. + - !ruby/object:Api::Type::String + name: 'deviceName' + description: | + Specifies a unique device name of your choice that is reflected into the /dev/disk/by-id/google-* tree of a Linux operating system running within the instance. This name can be used to reference the device for mounting, resizing, and so on, from within the instance. If not specified, the server chooses a default device name to apply to this disk, in the form persistent-disk-x, where x is a number assigned by Google Compute Engine. This field is only applicable for persistent disks. + - !ruby/object:Api::Type::Integer + name: 'index' + description: | + [Output Only] A zero-based index to this disk, where 0 is reserved for the boot disk. If you have many disks attached to an instance, each disk would have a unique index number. + - !ruby/object:Api::Type::Boolean + name: 'boot' + description: | + Indicates that this is a boot disk. The virtual machine will use the first partition of the disk for its root filesystem. + - !ruby/object:Api::Type::NestedObject + name: 'initializeParams' + description: | + [Input Only] Specifies the parameters for a new disk that will be created alongside the new instance. Use initialization parameters to create boot disks or local SSDs attached to the new instance. This field is persisted and returned for instanceTemplate and not returned in the context of instance. This property is mutually exclusive with the source property; you can only define one or the other, but not both. + properties: + - !ruby/object:Api::Type::String + name: 'diskName' + description: | + Specifies the disk name. If not specified, the default is to use the name of the instance. If a disk with the same name already exists in the given region, the existing disk is attached to the new instance and the new disk is not created. + - !ruby/object:Api::Type::String + name: 'sourceImage' + description: | + The source image to create this disk. When creating a new instance, one of initializeParams.sourceImage or initializeParams.sourceSnapshot or disks.source is required except for local SSD. To create a disk with one of the public operating system images, specify the image by its family name. For example, specify family/debian-9 to use the latest Debian 9 image: projects/debian-cloud/global/images/family/debian-9 Alternatively, use a specific version of a public operating system image: projects/debian-cloud/global/images/debian-9-stretch-vYYYYMMDD To create a disk with a custom image that you created, specify the image name in the following format: global/images/my-custom-image You can also specify a custom image by its image family, which returns the latest version of the image in that family. Replace the image name with family/family-name: global/images/family/my-image-family If the source image is deleted later, this field will not be set. + - !ruby/object:Api::Type::String + name: 'diskSizeGb' + description: | + Specifies the size of the disk in base-2 GB. The size must be at least 10 GB. If you specify a sourceImage, which is required for boot disks, the default size is the size of the sourceImage. If you do not specify a sourceImage, the default disk size is 500 GB. + - !ruby/object:Api::Type::String + name: 'diskType' + description: | + Specifies the disk type to use to create the instance. If not specified, the default is pd-standard, specified using the full URL. For example: https://www.googleapis.com/compute/v1/projects/project/zones/zone /diskTypes/pd-standard For a full list of acceptable values, see Persistent disk types. If you specify this field when creating a VM, you can provide either the full or partial URL. For example, the following values are valid: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /diskTypes/diskType - projects/project/zones/zone/diskTypes/diskType - zones/zone/diskTypes/diskType If you specify this field when creating or updating an instance template or all-instances configuration, specify the type of the disk, not the URL. For example: pd-standard. + - !ruby/object:Api::Type::NestedObject + name: 'sourceImageEncryptionKey' + description: | + + properties: + - !ruby/object:Api::Type::String + name: 'rawKey' + description: | + Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 base64 to either encrypt or decrypt this resource. You can provide either the rawKey or the rsaEncryptedKey. For example: "rawKey": "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0=" + - !ruby/object:Api::Type::String + name: 'rsaEncryptedKey' + description: | + Specifies an RFC 4648 base64 encoded, RSA-wrapped 2048-bit customer-supplied encryption key to either encrypt or decrypt this resource. You can provide either the rawKey or the rsaEncryptedKey. For example: "rsaEncryptedKey": "ieCx/NcW06PcT7Ep1X6LUTc/hLvUDYyzSZPPVCVPTVEohpeHASqC8uw5TzyO9U+Fka9JFH z0mBibXUInrC/jEk014kCK/NPjYgEMOyssZ4ZINPKxlUh2zn1bV+MCaTICrdmuSBTWlUUiFoD D6PYznLwh8ZNdaheCeZ8ewEXgFQ8V+sDroLaN3Xs3MDTXQEMMoNUXMCZEIpg9Vtp9x2oe==" The key must meet the following requirements before you can provide it to Compute Engine: 1. The key is wrapped using a RSA public key certificate provided by Google. 2. After being wrapped, the key must be encoded in RFC 4648 base64 encoding. Gets the RSA public key certificate provided by Google at: https://cloud-certs.storage.googleapis.com/google-cloud-csek-ingress.pem + - !ruby/object:Api::Type::String + name: 'kmsKeyName' + description: | + The name of the encryption key that is stored in Google Cloud KMS. For example: "kmsKeyName": "projects/kms_project_id/locations/region/keyRings/ key_region/cryptoKeys/key The fully-qualifed key name may be returned for resource GET requests. For example: "kmsKeyName": "projects/kms_project_id/locations/region/keyRings/ key_region/cryptoKeys/key /cryptoKeyVersions/1 + - !ruby/object:Api::Type::String + name: 'sha256' + description: | + [Output only] The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied encryption key that protects this resource. + - !ruby/object:Api::Type::String + name: 'kmsKeyServiceAccount' + description: | + The service account being used for the encryption request for the given KMS key. If absent, the Compute Engine default service account is used. For example: "kmsKeyServiceAccount": "name@project_id.iam.gserviceaccount.com/ + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + Labels to apply to this disk. These can be later modified by the disks.setLabels method. This field is only applicable for persistent disks. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::String + name: 'sourceSnapshot' + description: | + The source snapshot to create this disk. When creating a new instance, one of initializeParams.sourceSnapshot or initializeParams.sourceImage or disks.source is required except for local SSD. To create a disk with a snapshot that you created, specify the snapshot name in the following format: global/snapshots/my-backup If the source snapshot is deleted later, this field will not be set. + - !ruby/object:Api::Type::NestedObject + name: 'sourceSnapshotEncryptionKey' + description: | + + properties: + - !ruby/object:Api::Type::String + name: 'rawKey' + description: | + Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 base64 to either encrypt or decrypt this resource. You can provide either the rawKey or the rsaEncryptedKey. For example: "rawKey": "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0=" + - !ruby/object:Api::Type::String + name: 'rsaEncryptedKey' + description: | + Specifies an RFC 4648 base64 encoded, RSA-wrapped 2048-bit customer-supplied encryption key to either encrypt or decrypt this resource. You can provide either the rawKey or the rsaEncryptedKey. For example: "rsaEncryptedKey": "ieCx/NcW06PcT7Ep1X6LUTc/hLvUDYyzSZPPVCVPTVEohpeHASqC8uw5TzyO9U+Fka9JFH z0mBibXUInrC/jEk014kCK/NPjYgEMOyssZ4ZINPKxlUh2zn1bV+MCaTICrdmuSBTWlUUiFoD D6PYznLwh8ZNdaheCeZ8ewEXgFQ8V+sDroLaN3Xs3MDTXQEMMoNUXMCZEIpg9Vtp9x2oe==" The key must meet the following requirements before you can provide it to Compute Engine: 1. The key is wrapped using a RSA public key certificate provided by Google. 2. After being wrapped, the key must be encoded in RFC 4648 base64 encoding. Gets the RSA public key certificate provided by Google at: https://cloud-certs.storage.googleapis.com/google-cloud-csek-ingress.pem + - !ruby/object:Api::Type::String + name: 'kmsKeyName' + description: | + The name of the encryption key that is stored in Google Cloud KMS. For example: "kmsKeyName": "projects/kms_project_id/locations/region/keyRings/ key_region/cryptoKeys/key The fully-qualifed key name may be returned for resource GET requests. For example: "kmsKeyName": "projects/kms_project_id/locations/region/keyRings/ key_region/cryptoKeys/key /cryptoKeyVersions/1 + - !ruby/object:Api::Type::String + name: 'sha256' + description: | + [Output only] The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied encryption key that protects this resource. + - !ruby/object:Api::Type::String + name: 'kmsKeyServiceAccount' + description: | + The service account being used for the encryption request for the given KMS key. If absent, the Compute Engine default service account is used. For example: "kmsKeyServiceAccount": "name@project_id.iam.gserviceaccount.com/ + - !ruby/object:Api::Type::String + name: 'description' + description: | + An optional description. Provide this property when creating the disk. + - !ruby/object:Api::Type::Array + name: 'replicaZones' + description: | + Required for each regional disk associated with the instance. Specify the URLs of the zones where the disk should be replicated to. You must provide exactly two replica zones, and one zone must be the same as the instance zone. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'resourcePolicies' + description: | + Resource policies applied to this disk for automatic snapshot creations. Specified using the full or partial URL. For instance template, specify only the resource policy name. + item_type: Api::Type::String + - !ruby/object:Api::Type::Enum + name: 'onUpdateAction' + description: | + Specifies which action to take on instance update with this disk. Default is to use the existing disk. + values: + - :RECREATE_DISK + - :RECREATE_DISK_IF_SOURCE_CHANGED + - :USE_EXISTING_DISK + - !ruby/object:Api::Type::String + name: 'provisionedIops' + description: | + Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. Values must be between 10,000 and 120,000. For more details, see the Extreme persistent disk documentation. + - !ruby/object:Api::Type::Array + name: 'licenses' + description: | + A list of publicly visible licenses. Reserved for Google's use. + item_type: Api::Type::String + - !ruby/object:Api::Type::Enum + name: 'architecture' + description: | + The architecture of the attached disk. Valid values are arm64 or x86_64. + values: + - :ARCHITECTURE_UNSPECIFIED + - :ARM64 + - :X86_64 + - !ruby/object:Api::Type::NestedObject + name: 'resourceManagerTags' + description: | + Resource manager tags to be bound to the disk. Tag keys and values have the same definition as resource manager tags. Keys must be in the format `tagKeys/{tag_key_id}`, and values are in the format `tagValues/456`. The field is ignored (both PUT & PATCH) when empty. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::String + name: 'provisionedThroughput' + description: | + Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be between 1 and 7,124. + - !ruby/object:Api::Type::Boolean + name: 'autoDelete' + description: | + Specifies whether the disk will be auto-deleted when the instance is deleted (but not when the disk is detached from the instance). + - !ruby/object:Api::Type::Array + name: 'licenses' + description: | + [Output Only] Any valid publicly visible licenses. + item_type: Api::Type::String + - !ruby/object:Api::Type::Enum + name: 'interface' + description: | + Specifies the disk interface to use for attaching this disk, which is either SCSI or NVME. For most machine types, the default is SCSI. Local SSDs can use either NVME or SCSI. In certain configurations, persistent disks can use NVMe. For more information, see About persistent disks. + values: + - :NVME + - :SCSI + - !ruby/object:Api::Type::Array + name: 'guestOsFeatures' + description: | + A list of features to enable on the guest operating system. Applicable only for bootable images. Read Enabling guest operating system features to see a list of available options. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::Enum + name: 'type' + description: | + The ID of a supported feature. To add multiple values, use commas to separate values. Set to one or more of the following values: - VIRTIO_SCSI_MULTIQUEUE - WINDOWS - MULTI_IP_SUBNET - UEFI_COMPATIBLE - GVNIC - SEV_CAPABLE - SUSPEND_RESUME_COMPATIBLE - SEV_LIVE_MIGRATABLE - SEV_SNP_CAPABLE For more information, see Enabling guest operating system features. + values: + - :FEATURE_TYPE_UNSPECIFIED + - :GVNIC + - :MULTI_IP_SUBNET + - :SECURE_BOOT + - :SEV_CAPABLE + - :SEV_LIVE_MIGRATABLE + - :SEV_LIVE_MIGRATABLE_V2 + - :SEV_SNP_CAPABLE + - :UEFI_COMPATIBLE + - :VIRTIO_SCSI_MULTIQUEUE + - :WINDOWS + - !ruby/object:Api::Type::NestedObject + name: 'diskEncryptionKey' + description: | + + properties: + - !ruby/object:Api::Type::String + name: 'rawKey' + description: | + Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 base64 to either encrypt or decrypt this resource. You can provide either the rawKey or the rsaEncryptedKey. For example: "rawKey": "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0=" + - !ruby/object:Api::Type::String + name: 'rsaEncryptedKey' + description: | + Specifies an RFC 4648 base64 encoded, RSA-wrapped 2048-bit customer-supplied encryption key to either encrypt or decrypt this resource. You can provide either the rawKey or the rsaEncryptedKey. For example: "rsaEncryptedKey": "ieCx/NcW06PcT7Ep1X6LUTc/hLvUDYyzSZPPVCVPTVEohpeHASqC8uw5TzyO9U+Fka9JFH z0mBibXUInrC/jEk014kCK/NPjYgEMOyssZ4ZINPKxlUh2zn1bV+MCaTICrdmuSBTWlUUiFoD D6PYznLwh8ZNdaheCeZ8ewEXgFQ8V+sDroLaN3Xs3MDTXQEMMoNUXMCZEIpg9Vtp9x2oe==" The key must meet the following requirements before you can provide it to Compute Engine: 1. The key is wrapped using a RSA public key certificate provided by Google. 2. After being wrapped, the key must be encoded in RFC 4648 base64 encoding. Gets the RSA public key certificate provided by Google at: https://cloud-certs.storage.googleapis.com/google-cloud-csek-ingress.pem + - !ruby/object:Api::Type::String + name: 'kmsKeyName' + description: | + The name of the encryption key that is stored in Google Cloud KMS. For example: "kmsKeyName": "projects/kms_project_id/locations/region/keyRings/ key_region/cryptoKeys/key The fully-qualifed key name may be returned for resource GET requests. For example: "kmsKeyName": "projects/kms_project_id/locations/region/keyRings/ key_region/cryptoKeys/key /cryptoKeyVersions/1 + - !ruby/object:Api::Type::String + name: 'sha256' + description: | + [Output only] The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied encryption key that protects this resource. + - !ruby/object:Api::Type::String + name: 'kmsKeyServiceAccount' + description: | + The service account being used for the encryption request for the given KMS key. If absent, the Compute Engine default service account is used. For example: "kmsKeyServiceAccount": "name@project_id.iam.gserviceaccount.com/ + - !ruby/object:Api::Type::String + name: 'diskSizeGb' + description: | + The size of the disk in GB. + - !ruby/object:Api::Type::NestedObject + name: 'shieldedInstanceInitialState' + description: | + Initial State for shielded instance, these are public keys which are safe to store in public + properties: + - !ruby/object:Api::Type::NestedObject + name: 'pk' + description: | + + properties: + - !ruby/object:Api::Type::String + name: 'content' + description: | + The raw content in the secure keys file. + - !ruby/object:Api::Type::Enum + name: 'fileType' + description: | + The file type of source file. + values: + - :BIN + - :UNDEFINED + - :X509 + - !ruby/object:Api::Type::Array + name: 'keks' + description: | + The Key Exchange Key (KEK). + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'content' + description: | + The raw content in the secure keys file. + - !ruby/object:Api::Type::Enum + name: 'fileType' + description: | + The file type of source file. + values: + - :BIN + - :UNDEFINED + - :X509 + - !ruby/object:Api::Type::Array + name: 'dbs' + description: | + The Key Database (db). + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'content' + description: | + The raw content in the secure keys file. + - !ruby/object:Api::Type::Enum + name: 'fileType' + description: | + The file type of source file. + values: + - :BIN + - :UNDEFINED + - :X509 + - !ruby/object:Api::Type::Array + name: 'dbxs' + description: | + The forbidden key database (dbx). + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'content' + description: | + The raw content in the secure keys file. + - !ruby/object:Api::Type::Enum + name: 'fileType' + description: | + The file type of source file. + values: + - :BIN + - :UNDEFINED + - :X509 + - !ruby/object:Api::Type::Boolean + name: 'forceAttach' + description: | + [Input Only] Whether to force attach the regional disk even if it's currently attached to another instance. If you try to force attach a zonal disk to an instance, you will receive an error. + - !ruby/object:Api::Type::Enum + name: 'architecture' + description: | + [Output Only] The architecture of the attached disk. Valid values are ARM64 or X86_64. + values: + - :ARCHITECTURE_UNSPECIFIED + - :ARM64 + - :X86_64 + - !ruby/object:Api::Type::NestedObject + name: 'metadata' + description: | + A metadata key/value entry. + properties: + - !ruby/object:Api::Type::String + name: 'kind' + description: | + [Output Only] Type of the resource. Always compute#metadata for metadata. + - !ruby/object:Api::Type::String + name: 'fingerprint' + description: | + Specifies a fingerprint for this request, which is essentially a hash of the metadata's contents and used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update metadata. You must always provide an up-to-date fingerprint hash in order to update or change metadata, otherwise the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make a get() request to retrieve the resource. + - !ruby/object:Api::Type::Array + name: 'items' + description: | + Array of key/value pairs. The total size of all keys and values must be less than 512 KB. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'key' + description: | + Key for the metadata entry. Keys must conform to the following regexp: [a-zA-Z0-9-_]+, and be less than 128 bytes in length. This is reflected as part of a URL in the metadata server. Additionally, to avoid ambiguity, keys must not conflict with any other metadata keys for the project. + - !ruby/object:Api::Type::String + name: 'value' + description: | + Value for the metadata entry. These are free-form strings, and only have meaning as interpreted by the image running in the instance. The only restriction placed on values is that their size must be less than or equal to 262144 bytes (256 KiB). + - !ruby/object:Api::Type::Array + name: 'serviceAccounts' + description: | + A list of service accounts with specified scopes. Access tokens for these service accounts are available to the instances that are created from these properties. Use metadata queries to obtain the access tokens for these instances. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'email' + description: | + Email address of the service account. + - !ruby/object:Api::Type::Array + name: 'scopes' + description: | + The list of scopes to be made available for this service account. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'scheduling' + description: | + Sets the scheduling options for an Instance. + properties: + - !ruby/object:Api::Type::Enum + name: 'onHostMaintenance' + description: | + Defines the maintenance behavior for this instance. For standard instances, the default behavior is MIGRATE. For preemptible instances, the default and only possible behavior is TERMINATE. For more information, see Set VM host maintenance policy. + values: + - :MIGRATE + - :TERMINATE + - !ruby/object:Api::Type::Boolean + name: 'automaticRestart' + description: | + Specifies whether the instance should be automatically restarted if it is terminated by Compute Engine (not terminated by a user). You can only set the automatic restart option for standard instances. Preemptible instances cannot be automatically restarted. By default, this is set to true so an instance is automatically restarted if it is terminated by Compute Engine. + - !ruby/object:Api::Type::Boolean + name: 'preemptible' + description: | + Defines whether the instance is preemptible. This can only be set during instance creation or while the instance is stopped and therefore, in a `TERMINATED` state. See Instance Life Cycle for more information on the possible instance states. + - !ruby/object:Api::Type::Array + name: 'nodeAffinities' + description: | + A set of node affinity and anti-affinity configurations. Refer to Configuring node affinity for more information. Overrides reservationAffinity. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'key' + description: | + Corresponds to the label key of Node resource. + - !ruby/object:Api::Type::Enum + name: 'operator' + description: | + Defines the operation of node selection. Valid operators are IN for affinity and NOT_IN for anti-affinity. + values: + - :IN + - :NOT_IN + - :OPERATOR_UNSPECIFIED + - !ruby/object:Api::Type::Array + name: 'values' + description: | + Corresponds to the label values of Node resource. + item_type: Api::Type::String + - !ruby/object:Api::Type::Integer + name: 'minNodeCpus' + description: | + The minimum number of virtual CPUs this instance will consume when running on a sole-tenant node. + - !ruby/object:Api::Type::String + name: 'locationHint' + description: | + An opaque location hint used to place the instance close to other resources. This field is for use by internal tools that use the public API. + - !ruby/object:Api::Type::Enum + name: 'provisioningModel' + description: | + Specifies the provisioning model of the instance. + values: + - :SPOT + - :STANDARD + - !ruby/object:Api::Type::Enum + name: 'instanceTerminationAction' + description: | + Specifies the termination action for the instance. + values: + - :DELETE + - :INSTANCE_TERMINATION_ACTION_UNSPECIFIED + - :STOP + - !ruby/object:Api::Type::NestedObject + name: 'localSsdRecoveryTimeout' + description: | + A Duration represents a fixed-length span of time represented as a count of seconds and fractions of seconds at nanosecond resolution. It is independent of any calendar and concepts like "day" or "month". Range is approximately 10,000 years. + properties: + - !ruby/object:Api::Type::String + name: 'seconds' + description: | + Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 inclusive. Note: these bounds are computed from: 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years + - !ruby/object:Api::Type::Integer + name: 'nanos' + description: | + Span of time that's a fraction of a second at nanosecond resolution. Durations less than one second are represented with a 0 `seconds` field and a positive `nanos` field. Must be from 0 to 999,999,999 inclusive. + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + Labels to apply to instances that are created from these properties. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::Array + name: 'guestAccelerators' + description: | + A list of guest accelerator cards' type and count to use for instances created from these properties. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'acceleratorType' + description: | + Full or partial URL of the accelerator type resource to attach to this instance. For example: projects/my-project/zones/us-central1-c/acceleratorTypes/nvidia-tesla-p100 If you are creating an instance template, specify only the accelerator name. See GPUs on Compute Engine for a full list of accelerator types. + - !ruby/object:Api::Type::Integer + name: 'acceleratorCount' + description: | + The number of the guest accelerator cards exposed to this instance. + - !ruby/object:Api::Type::String + name: 'minCpuPlatform' + description: | + Minimum cpu/platform to be used by instances. The instance may be scheduled on the specified or newer cpu/platform. Applicable values are the friendly names of CPU platforms, such as minCpuPlatform: "Intel Haswell" or minCpuPlatform: "Intel Sandy Bridge". For more information, read Specifying a Minimum CPU Platform. + - !ruby/object:Api::Type::NestedObject + name: 'reservationAffinity' + description: | + Specifies the reservations that this instance can consume from. + properties: + - !ruby/object:Api::Type::Enum + name: 'consumeReservationType' + description: | + Specifies the type of reservation from which this instance can consume resources: ANY_RESERVATION (default), SPECIFIC_RESERVATION, or NO_RESERVATION. See Consuming reserved instances for examples. + values: + - :ANY_RESERVATION + - :NO_RESERVATION + - :SPECIFIC_RESERVATION + - :UNSPECIFIED + - !ruby/object:Api::Type::String + name: 'key' + description: | + Corresponds to the label key of a reservation resource. To target a SPECIFIC_RESERVATION by name, specify googleapis.com/reservation-name as the key and specify the name of your reservation as its value. + - !ruby/object:Api::Type::Array + name: 'values' + description: | + Corresponds to the label values of a reservation resource. This can be either a name to a reservation in the same project or "projects/different-project/reservations/some-reservation-name" to target a shared reservation in the same zone but in a different project. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'shieldedInstanceConfig' + description: | + A set of Shielded Instance options. + properties: + - !ruby/object:Api::Type::Boolean + name: 'enableSecureBoot' + description: | + Defines whether the instance has Secure Boot enabled. Disabled by default. + - !ruby/object:Api::Type::Boolean + name: 'enableVtpm' + description: | + Defines whether the instance has the vTPM enabled. Enabled by default. + - !ruby/object:Api::Type::Boolean + name: 'enableIntegrityMonitoring' + description: | + Defines whether the instance has integrity monitoring enabled. Enabled by default. + - !ruby/object:Api::Type::Array + name: 'resourcePolicies' + description: | + Resource policies (names, not URLs) applied to instances created from these properties. Note that for MachineImage, this is not supported yet. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'confidentialInstanceConfig' + description: | + A set of Confidential Instance options. + properties: + - !ruby/object:Api::Type::Boolean + name: 'enableConfidentialCompute' + description: | + Defines whether the instance should have confidential compute enabled. + - !ruby/object:Api::Type::Enum + name: 'privateIpv6GoogleAccess' + description: | + The private IPv6 google access type for VMs. If not specified, use INHERIT_FROM_SUBNETWORK as default. Note that for MachineImage, this is not supported yet. + values: + - :ENABLE_BIDIRECTIONAL_ACCESS_TO_GOOGLE + - :ENABLE_OUTBOUND_VM_ACCESS_TO_GOOGLE + - :INHERIT_FROM_SUBNETWORK + - !ruby/object:Api::Type::NestedObject + name: 'advancedMachineFeatures' + description: | + Specifies options for controlling advanced machine features. Options that would traditionally be configured in a BIOS belong here. Features that require operating system support may have corresponding entries in the GuestOsFeatures of an Image (e.g., whether or not the OS in the Image supports nested virtualization being enabled or disabled). + properties: + - !ruby/object:Api::Type::Boolean + name: 'enableNestedVirtualization' + description: | + Whether to enable nested virtualization or not (default is false). + - !ruby/object:Api::Type::Integer + name: 'threadsPerCore' + description: | + The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed. + - !ruby/object:Api::Type::Integer + name: 'visibleCoreCount' + description: | + The number of physical cores to expose to an instance. Multiply by the number of threads per core to compute the total number of virtual CPUs to expose to the instance. If unset, the number of cores is inferred from the instance's nominal CPU count and the underlying platform's SMT width. + - !ruby/object:Api::Type::Boolean + name: 'enableUefiNetworking' + description: | + Whether to enable UEFI networking for instance creation. + - !ruby/object:Api::Type::NestedObject + name: 'networkPerformanceConfig' + description: | + + properties: + - !ruby/object:Api::Type::Enum + name: 'totalEgressBandwidthTier' + description: | + + values: + - :DEFAULT + - :TIER_1 + - !ruby/object:Api::Type::Enum + name: 'keyRevocationActionType' + description: | + KeyRevocationActionType of the instance. Supported options are "STOP" and "NONE". The default value is "NONE" if it is not specified. + values: + - :KEY_REVOCATION_ACTION_TYPE_UNSPECIFIED + - :NONE + - :STOP + - !ruby/object:Api::Type::Array + name: 'savedDisks' + description: | + An array of Machine Image specific properties for disks attached to the source instance + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'kind' + description: | + [Output Only] Type of the resource. Always compute#savedDisk for attached disks. + - !ruby/object:Api::Type::String + name: 'sourceDisk' + description: | + Specifies a URL of the disk attached to the source instance. + - !ruby/object:Api::Type::String + name: 'storageBytes' + description: | + [Output Only] Size of the individual disk snapshot used by this machine image. + - !ruby/object:Api::Type::Enum + name: 'storageBytesStatus' + description: | + [Output Only] An indicator whether storageBytes is in a stable state or it is being adjusted as a result of shared storage reallocation. This status can either be UPDATING, meaning the size of the snapshot is being updated, or UP_TO_DATE, meaning the size of the snapshot is up-to-date. + values: + - :UPDATING + - :UP_TO_DATE + - !ruby/object:Api::Type::Enum + name: 'architecture' + description: | + [Output Only] The architecture of the attached disk. + values: + - :ARCHITECTURE_UNSPECIFIED + - :ARM64 + - :X86_64 + - !ruby/object:Api::Type::Array + name: 'storageLocations' + description: | + The regional or multi-regional Cloud Storage bucket location where the machine image is stored. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'machineImageEncryptionKey' + description: | + + properties: + - !ruby/object:Api::Type::String + name: 'rawKey' + description: | + Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 base64 to either encrypt or decrypt this resource. You can provide either the rawKey or the rsaEncryptedKey. For example: "rawKey": "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0=" + - !ruby/object:Api::Type::String + name: 'rsaEncryptedKey' + description: | + Specifies an RFC 4648 base64 encoded, RSA-wrapped 2048-bit customer-supplied encryption key to either encrypt or decrypt this resource. You can provide either the rawKey or the rsaEncryptedKey. For example: "rsaEncryptedKey": "ieCx/NcW06PcT7Ep1X6LUTc/hLvUDYyzSZPPVCVPTVEohpeHASqC8uw5TzyO9U+Fka9JFH z0mBibXUInrC/jEk014kCK/NPjYgEMOyssZ4ZINPKxlUh2zn1bV+MCaTICrdmuSBTWlUUiFoD D6PYznLwh8ZNdaheCeZ8ewEXgFQ8V+sDroLaN3Xs3MDTXQEMMoNUXMCZEIpg9Vtp9x2oe==" The key must meet the following requirements before you can provide it to Compute Engine: 1. The key is wrapped using a RSA public key certificate provided by Google. 2. After being wrapped, the key must be encoded in RFC 4648 base64 encoding. Gets the RSA public key certificate provided by Google at: https://cloud-certs.storage.googleapis.com/google-cloud-csek-ingress.pem + - !ruby/object:Api::Type::String + name: 'kmsKeyName' + description: | + The name of the encryption key that is stored in Google Cloud KMS. For example: "kmsKeyName": "projects/kms_project_id/locations/region/keyRings/ key_region/cryptoKeys/key The fully-qualifed key name may be returned for resource GET requests. For example: "kmsKeyName": "projects/kms_project_id/locations/region/keyRings/ key_region/cryptoKeys/key /cryptoKeyVersions/1 + - !ruby/object:Api::Type::String + name: 'sha256' + description: | + [Output only] The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied encryption key that protects this resource. + - !ruby/object:Api::Type::String + name: 'kmsKeyServiceAccount' + description: | + The service account being used for the encryption request for the given KMS key. If absent, the Compute Engine default service account is used. For example: "kmsKeyServiceAccount": "name@project_id.iam.gserviceaccount.com/ + - !ruby/object:Api::Type::Boolean + name: 'guestFlush' + description: | + [Input Only] Whether to attempt an application consistent machine image by informing the OS to prepare for the snapshot process. + - !ruby/object:Api::Type::Array + name: 'sourceDiskEncryptionKeys' + description: | + [Input Only] The customer-supplied encryption key of the disks attached to the source instance. Required if the source disk is protected by a customer-supplied encryption key. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'sourceDisk' + description: | + URL of the disk attached to the source instance. This can be a full or valid partial URL. For example, the following are valid values: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /disks/disk - projects/project/zones/zone/disks/disk - zones/zone/disks/disk + - !ruby/object:Api::Type::NestedObject + name: 'diskEncryptionKey' + description: | + + properties: + - !ruby/object:Api::Type::String + name: 'rawKey' + description: | + Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 base64 to either encrypt or decrypt this resource. You can provide either the rawKey or the rsaEncryptedKey. For example: "rawKey": "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0=" + - !ruby/object:Api::Type::String + name: 'rsaEncryptedKey' + description: | + Specifies an RFC 4648 base64 encoded, RSA-wrapped 2048-bit customer-supplied encryption key to either encrypt or decrypt this resource. You can provide either the rawKey or the rsaEncryptedKey. For example: "rsaEncryptedKey": "ieCx/NcW06PcT7Ep1X6LUTc/hLvUDYyzSZPPVCVPTVEohpeHASqC8uw5TzyO9U+Fka9JFH z0mBibXUInrC/jEk014kCK/NPjYgEMOyssZ4ZINPKxlUh2zn1bV+MCaTICrdmuSBTWlUUiFoD D6PYznLwh8ZNdaheCeZ8ewEXgFQ8V+sDroLaN3Xs3MDTXQEMMoNUXMCZEIpg9Vtp9x2oe==" The key must meet the following requirements before you can provide it to Compute Engine: 1. The key is wrapped using a RSA public key certificate provided by Google. 2. After being wrapped, the key must be encoded in RFC 4648 base64 encoding. Gets the RSA public key certificate provided by Google at: https://cloud-certs.storage.googleapis.com/google-cloud-csek-ingress.pem + - !ruby/object:Api::Type::String + name: 'kmsKeyName' + description: | + The name of the encryption key that is stored in Google Cloud KMS. For example: "kmsKeyName": "projects/kms_project_id/locations/region/keyRings/ key_region/cryptoKeys/key The fully-qualifed key name may be returned for resource GET requests. For example: "kmsKeyName": "projects/kms_project_id/locations/region/keyRings/ key_region/cryptoKeys/key /cryptoKeyVersions/1 + - !ruby/object:Api::Type::String + name: 'sha256' + description: | + [Output only] The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied encryption key that protects this resource. + - !ruby/object:Api::Type::String + name: 'kmsKeyServiceAccount' + description: | + The service account being used for the encryption request for the given KMS key. If absent, the Compute Engine default service account is used. For example: "kmsKeyServiceAccount": "name@project_id.iam.gserviceaccount.com/ + - !ruby/object:Api::Type::String + name: 'totalStorageBytes' + description: | + [Output Only] Total size of the storage used by the machine image. + - !ruby/object:Api::Type::Boolean + name: 'satisfiesPzs' + description: | + [Output Only] Reserved for future use. + + + + + - !ruby/object:Api::Resource + name: MachineImage + base_url: 'projects/{{project}}/global/machineImages' + self_link: 'projects/{{project}}/global/machineImages/{{machineImage}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/compute_v1/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + Represents a machine image resource. A machine image is a Compute Engine resource that stores all the configuration, metadata, permissions, and data from one or more disks required to create a Virtual machine (VM) instance. For more information, see Machine images. + properties: + + - !ruby/object:Api::Type::String + name: 'kind' + description: | + [Output Only] The resource type, which is always compute#machineImage for machine image. + - !ruby/object:Api::Type::String + name: 'id' + description: | + [Output Only] A unique identifier for this machine image. The server defines this identifier. + - !ruby/object:Api::Type::String + name: 'creationTimestamp' + description: | + [Output Only] The creation timestamp for this machine image in RFC3339 text format. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. + - !ruby/object:Api::Type::String + name: 'description' + description: | + An optional description of this resource. Provide this property when you create the resource. + - !ruby/object:Api::Type::String + name: 'selfLink' + description: | + [Output Only] The URL for this machine image. The server defines this URL. + - !ruby/object:Api::Type::String + name: 'sourceInstance' + description: | + The source instance used to create the machine image. You can provide this as a partial or full URL to the resource. For example, the following are valid values: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /instances/instance - projects/project/zones/zone/instances/instance + - !ruby/object:Api::Type::Enum + name: 'status' + description: | + [Output Only] The status of the machine image. One of the following values: INVALID, CREATING, READY, DELETING, and UPLOADING. + values: + - :CREATING + - :DELETING + - :INVALID + - :READY + - :UPLOADING + - !ruby/object:Api::Type::NestedObject + name: 'sourceInstanceProperties' + description: | + DEPRECATED: Please use compute#instanceProperties instead. New properties will not be added to this field. + properties: + - !ruby/object:Api::Type::String + name: 'description' + description: | + An optional text description for the instances that are created from this machine image. + - !ruby/object:Api::Type::NestedObject + name: 'tags' + description: | + A set of instance tags. + properties: + - !ruby/object:Api::Type::Array + name: 'items' + description: | + An array of tags. Each tag must be 1-63 characters long, and comply with RFC1035. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'fingerprint' + description: | + Specifies a fingerprint for this request, which is essentially a hash of the tags' contents and used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update tags. You must always provide an up-to-date fingerprint hash in order to update or change tags. To see the latest fingerprint, make get() request to the instance. + - !ruby/object:Api::Type::String + name: 'machineType' + description: | + The machine type to use for instances that are created from this machine image. + - !ruby/object:Api::Type::Boolean + name: 'canIpForward' + description: | + Enables instances created based on this machine image to send packets with source IP addresses other than their own and receive packets with destination IP addresses other than their own. If these instances will be used as an IP gateway or it will be set as the next-hop in a Route resource, specify true. If unsure, leave this set to false. See the Enable IP forwarding documentation for more information. + - !ruby/object:Api::Type::Array + name: 'networkInterfaces' + description: | + An array of network access configurations for this interface. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'kind' + description: | + [Output Only] Type of the resource. Always compute#networkInterface for network interfaces. + - !ruby/object:Api::Type::String + name: 'network' + description: | + URL of the VPC network resource for this instance. When creating an instance, if neither the network nor the subnetwork is specified, the default network global/networks/default is used. If the selected project doesn't have the default network, you must specify a network or subnet. If the network is not specified but the subnetwork is specified, the network is inferred. If you specify this property, you can specify the network as a full or partial URL. For example, the following are all valid URLs: - https://www.googleapis.com/compute/v1/projects/project/global/networks/ network - projects/project/global/networks/network - global/networks/default + - !ruby/object:Api::Type::String + name: 'subnetwork' + description: | + The URL of the Subnetwork resource for this instance. If the network resource is in legacy mode, do not specify this field. If the network is in auto subnet mode, specifying the subnetwork is optional. If the network is in custom subnet mode, specifying the subnetwork is required. If you specify this field, you can specify the subnetwork as a full or partial URL. For example, the following are all valid URLs: - https://www.googleapis.com/compute/v1/projects/project/regions/region /subnetworks/subnetwork - regions/region/subnetworks/subnetwork + - !ruby/object:Api::Type::String + name: 'networkIP' + description: | + An IPv4 internal IP address to assign to the instance for this network interface. If not specified by the user, an unused internal IP is assigned by the system. + - !ruby/object:Api::Type::String + name: 'ipv6Address' + description: | + An IPv6 internal network address for this network interface. To use a static internal IP address, it must be unused and in the same region as the instance's zone. If not specified, Google Cloud will automatically assign an internal IPv6 address from the instance's subnetwork. + - !ruby/object:Api::Type::Integer + name: 'internalIpv6PrefixLength' + description: | + The prefix length of the primary internal IPv6 range. + - !ruby/object:Api::Type::String + name: 'name' + description: | + [Output Only] The name of the network interface, which is generated by the server. For a VM, the network interface uses the nicN naming format. Where N is a value between 0 and 7. The default interface value is nic0. + - !ruby/object:Api::Type::Array + name: 'accessConfigs' + description: | + An array of configurations for this interface. Currently, only one access config, ONE_TO_ONE_NAT, is supported. If there are no accessConfigs specified, then this instance will have no external internet access. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'kind' + description: | + [Output Only] Type of the resource. Always compute#accessConfig for access configs. + - !ruby/object:Api::Type::Enum + name: 'type' + description: | + The type of configuration. In accessConfigs (IPv4), the default and only option is ONE_TO_ONE_NAT. In ipv6AccessConfigs, the default and only option is DIRECT_IPV6. + values: + - :DIRECT_IPV6 + - :ONE_TO_ONE_NAT + - !ruby/object:Api::Type::String + name: 'name' + description: | + The name of this access configuration. In accessConfigs (IPv4), the default and recommended name is External NAT, but you can use any arbitrary string, such as My external IP or Network Access. In ipv6AccessConfigs, the recommend name is External IPv6. + - !ruby/object:Api::Type::String + name: 'natIP' + description: | + Applies to accessConfigs (IPv4) only. An external IP address associated with this instance. Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance. + - !ruby/object:Api::Type::String + name: 'externalIpv6' + description: | + Applies to ipv6AccessConfigs only. The first IPv6 address of the external IPv6 range associated with this instance, prefix length is stored in externalIpv6PrefixLength in ipv6AccessConfig. To use a static external IP address, it must be unused and in the same region as the instance's zone. If not specified, Google Cloud will automatically assign an external IPv6 address from the instance's subnetwork. + - !ruby/object:Api::Type::Integer + name: 'externalIpv6PrefixLength' + description: | + Applies to ipv6AccessConfigs only. The prefix length of the external IPv6 range. + - !ruby/object:Api::Type::Boolean + name: 'setPublicPtr' + description: | + Specifies whether a public DNS 'PTR' record should be created to map the external IP address of the instance to a DNS domain name. This field is not used in ipv6AccessConfig. A default PTR record will be created if the VM has external IPv6 range associated. + - !ruby/object:Api::Type::String + name: 'publicPtrDomainName' + description: | + The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be createc for first IP in associated external IPv6 range. + - !ruby/object:Api::Type::Enum + name: 'networkTier' + description: | + This signifies the networking tier used for configuring this access configuration and can only take the following values: PREMIUM, STANDARD. If an AccessConfig is specified without a valid external IP address, an ephemeral IP will be created with this networkTier. If an AccessConfig with a valid external IP address is specified, it must match that of the networkTier associated with the Address resource owning that IP. + values: + - :FIXED_STANDARD + - :PREMIUM + - :STANDARD + - :STANDARD_OVERRIDES_FIXED_STANDARD + - !ruby/object:Api::Type::String + name: 'securityPolicy' + description: | + [Output Only] The resource URL for the security policy associated with this access config. + - !ruby/object:Api::Type::Array + name: 'ipv6AccessConfigs' + description: | + An array of IPv6 access configurations for this interface. Currently, only one IPv6 access config, DIRECT_IPV6, is supported. If there is no ipv6AccessConfig specified, then this instance will have no external IPv6 Internet access. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'kind' + description: | + [Output Only] Type of the resource. Always compute#accessConfig for access configs. + - !ruby/object:Api::Type::Enum + name: 'type' + description: | + The type of configuration. In accessConfigs (IPv4), the default and only option is ONE_TO_ONE_NAT. In ipv6AccessConfigs, the default and only option is DIRECT_IPV6. + values: + - :DIRECT_IPV6 + - :ONE_TO_ONE_NAT + - !ruby/object:Api::Type::String + name: 'name' + description: | + The name of this access configuration. In accessConfigs (IPv4), the default and recommended name is External NAT, but you can use any arbitrary string, such as My external IP or Network Access. In ipv6AccessConfigs, the recommend name is External IPv6. + - !ruby/object:Api::Type::String + name: 'natIP' + description: | + Applies to accessConfigs (IPv4) only. An external IP address associated with this instance. Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance. + - !ruby/object:Api::Type::String + name: 'externalIpv6' + description: | + Applies to ipv6AccessConfigs only. The first IPv6 address of the external IPv6 range associated with this instance, prefix length is stored in externalIpv6PrefixLength in ipv6AccessConfig. To use a static external IP address, it must be unused and in the same region as the instance's zone. If not specified, Google Cloud will automatically assign an external IPv6 address from the instance's subnetwork. + - !ruby/object:Api::Type::Integer + name: 'externalIpv6PrefixLength' + description: | + Applies to ipv6AccessConfigs only. The prefix length of the external IPv6 range. + - !ruby/object:Api::Type::Boolean + name: 'setPublicPtr' + description: | + Specifies whether a public DNS 'PTR' record should be created to map the external IP address of the instance to a DNS domain name. This field is not used in ipv6AccessConfig. A default PTR record will be created if the VM has external IPv6 range associated. + - !ruby/object:Api::Type::String + name: 'publicPtrDomainName' + description: | + The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be createc for first IP in associated external IPv6 range. + - !ruby/object:Api::Type::Enum + name: 'networkTier' + description: | + This signifies the networking tier used for configuring this access configuration and can only take the following values: PREMIUM, STANDARD. If an AccessConfig is specified without a valid external IP address, an ephemeral IP will be created with this networkTier. If an AccessConfig with a valid external IP address is specified, it must match that of the networkTier associated with the Address resource owning that IP. + values: + - :FIXED_STANDARD + - :PREMIUM + - :STANDARD + - :STANDARD_OVERRIDES_FIXED_STANDARD + - !ruby/object:Api::Type::String + name: 'securityPolicy' + description: | + [Output Only] The resource URL for the security policy associated with this access config. + - !ruby/object:Api::Type::Array + name: 'aliasIpRanges' + description: | + An array of alias IP ranges for this network interface. You can only specify this field for network interfaces in VPC networks. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'ipCidrRange' + description: | + The IP alias ranges to allocate for this interface. This IP CIDR range must belong to the specified subnetwork and cannot contain IP addresses reserved by system or used by other network interfaces. This range may be a single IP address (such as 10.2.3.4), a netmask (such as /24) or a CIDR-formatted string (such as 10.1.2.0/24). + - !ruby/object:Api::Type::String + name: 'subnetworkRangeName' + description: | + The name of a subnetwork secondary IP range from which to allocate an IP alias range. If not specified, the primary range of the subnetwork is used. + - !ruby/object:Api::Type::String + name: 'fingerprint' + description: | + Fingerprint hash of contents stored in this network interface. This field will be ignored when inserting an Instance or adding a NetworkInterface. An up-to-date fingerprint must be provided in order to update the NetworkInterface. The request will fail with error 400 Bad Request if the fingerprint is not provided, or 412 Precondition Failed if the fingerprint is out of date. + - !ruby/object:Api::Type::Enum + name: 'stackType' + description: | + The stack type for this network interface. To assign only IPv4 addresses, use IPV4_ONLY. To assign both IPv4 and IPv6 addresses, use IPV4_IPV6. If not specified, IPV4_ONLY is used. This field can be both set at instance creation and update network interface operations. + values: + - :IPV4_IPV6 + - :IPV4_ONLY + - !ruby/object:Api::Type::Enum + name: 'ipv6AccessType' + description: | + [Output Only] One of EXTERNAL, INTERNAL to indicate whether the IP can be accessed from the Internet. This field is always inherited from its subnetwork. Valid only if stackType is IPV4_IPV6. + values: + - :EXTERNAL + - :INTERNAL + - !ruby/object:Api::Type::Integer + name: 'queueCount' + description: | + The networking queue count that's specified by users for the network interface. Both Rx and Tx queues will be set to this number. It'll be empty if not specified by the users. + - !ruby/object:Api::Type::Enum + name: 'nicType' + description: | + The type of vNIC to be used on this interface. This may be gVNIC or VirtioNet. + values: + - :GVNIC + - :UNSPECIFIED_NIC_TYPE + - :VIRTIO_NET + - !ruby/object:Api::Type::String + name: 'networkAttachment' + description: | + The URL of the network attachment that this interface should connect to in the following format: projects/{project_number}/regions/{region_name}/networkAttachments/{network_attachment_name}. + - !ruby/object:Api::Type::Array + name: 'disks' + description: | + An array of disks that are associated with the instances that are created from this machine image. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'kind' + description: | + [Output Only] Type of the resource. Always compute#attachedDisk for attached disks. + - !ruby/object:Api::Type::Enum + name: 'type' + description: | + Specifies the type of the attached disk, either SCRATCH or PERSISTENT. + values: + - :PERSISTENT + - :SCRATCH + - !ruby/object:Api::Type::Enum + name: 'mode' + description: | + The mode in which this disk is attached to the source instance, either READ_WRITE or READ_ONLY. + values: + - :READ_ONLY + - :READ_WRITE + - !ruby/object:Api::Type::String + name: 'source' + description: | + Specifies a URL of the disk attached to the source instance. + - !ruby/object:Api::Type::String + name: 'deviceName' + description: | + Specifies the name of the disk attached to the source instance. + - !ruby/object:Api::Type::Integer + name: 'index' + description: | + Specifies zero-based index of the disk that is attached to the source instance. + - !ruby/object:Api::Type::Boolean + name: 'boot' + description: | + Indicates that this is a boot disk. The virtual machine will use the first partition of the disk for its root filesystem. + - !ruby/object:Api::Type::Boolean + name: 'autoDelete' + description: | + Specifies whether the disk will be auto-deleted when the instance is deleted (but not when the disk is detached from the instance). + - !ruby/object:Api::Type::Array + name: 'licenses' + description: | + [Output Only] Any valid publicly visible licenses. + item_type: Api::Type::String + - !ruby/object:Api::Type::Enum + name: 'interface' + description: | + Specifies the disk interface to use for attaching this disk, which is either SCSI or NVME. + values: + - :NVME + - :SCSI + - !ruby/object:Api::Type::Array + name: 'guestOsFeatures' + description: | + A list of features to enable on the guest operating system. Applicable only for bootable images. Read Enabling guest operating system features to see a list of available options. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::Enum + name: 'type' + description: | + The ID of a supported feature. To add multiple values, use commas to separate values. Set to one or more of the following values: - VIRTIO_SCSI_MULTIQUEUE - WINDOWS - MULTI_IP_SUBNET - UEFI_COMPATIBLE - GVNIC - SEV_CAPABLE - SUSPEND_RESUME_COMPATIBLE - SEV_LIVE_MIGRATABLE - SEV_SNP_CAPABLE For more information, see Enabling guest operating system features. + values: + - :FEATURE_TYPE_UNSPECIFIED + - :GVNIC + - :MULTI_IP_SUBNET + - :SECURE_BOOT + - :SEV_CAPABLE + - :SEV_LIVE_MIGRATABLE + - :SEV_LIVE_MIGRATABLE_V2 + - :SEV_SNP_CAPABLE + - :UEFI_COMPATIBLE + - :VIRTIO_SCSI_MULTIQUEUE + - :WINDOWS + - !ruby/object:Api::Type::NestedObject + name: 'diskEncryptionKey' + description: | + + properties: + - !ruby/object:Api::Type::String + name: 'rawKey' + description: | + Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 base64 to either encrypt or decrypt this resource. You can provide either the rawKey or the rsaEncryptedKey. For example: "rawKey": "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0=" + - !ruby/object:Api::Type::String + name: 'rsaEncryptedKey' + description: | + Specifies an RFC 4648 base64 encoded, RSA-wrapped 2048-bit customer-supplied encryption key to either encrypt or decrypt this resource. You can provide either the rawKey or the rsaEncryptedKey. For example: "rsaEncryptedKey": "ieCx/NcW06PcT7Ep1X6LUTc/hLvUDYyzSZPPVCVPTVEohpeHASqC8uw5TzyO9U+Fka9JFH z0mBibXUInrC/jEk014kCK/NPjYgEMOyssZ4ZINPKxlUh2zn1bV+MCaTICrdmuSBTWlUUiFoD D6PYznLwh8ZNdaheCeZ8ewEXgFQ8V+sDroLaN3Xs3MDTXQEMMoNUXMCZEIpg9Vtp9x2oe==" The key must meet the following requirements before you can provide it to Compute Engine: 1. The key is wrapped using a RSA public key certificate provided by Google. 2. After being wrapped, the key must be encoded in RFC 4648 base64 encoding. Gets the RSA public key certificate provided by Google at: https://cloud-certs.storage.googleapis.com/google-cloud-csek-ingress.pem + - !ruby/object:Api::Type::String + name: 'kmsKeyName' + description: | + The name of the encryption key that is stored in Google Cloud KMS. For example: "kmsKeyName": "projects/kms_project_id/locations/region/keyRings/ key_region/cryptoKeys/key The fully-qualifed key name may be returned for resource GET requests. For example: "kmsKeyName": "projects/kms_project_id/locations/region/keyRings/ key_region/cryptoKeys/key /cryptoKeyVersions/1 + - !ruby/object:Api::Type::String + name: 'sha256' + description: | + [Output only] The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied encryption key that protects this resource. + - !ruby/object:Api::Type::String + name: 'kmsKeyServiceAccount' + description: | + The service account being used for the encryption request for the given KMS key. If absent, the Compute Engine default service account is used. For example: "kmsKeyServiceAccount": "name@project_id.iam.gserviceaccount.com/ + - !ruby/object:Api::Type::String + name: 'diskSizeGb' + description: | + The size of the disk in base-2 GB. + - !ruby/object:Api::Type::String + name: 'storageBytes' + description: | + [Output Only] A size of the storage used by the disk's snapshot by this machine image. + - !ruby/object:Api::Type::Enum + name: 'storageBytesStatus' + description: | + [Output Only] An indicator whether storageBytes is in a stable state or it is being adjusted as a result of shared storage reallocation. This status can either be UPDATING, meaning the size of the snapshot is being updated, or UP_TO_DATE, meaning the size of the snapshot is up-to-date. + values: + - :UPDATING + - :UP_TO_DATE + - !ruby/object:Api::Type::String + name: 'diskType' + description: | + [Output Only] URL of the disk type resource. For example: projects/project /zones/zone/diskTypes/pd-standard or pd-ssd + - !ruby/object:Api::Type::NestedObject + name: 'metadata' + description: | + A metadata key/value entry. + properties: + - !ruby/object:Api::Type::String + name: 'kind' + description: | + [Output Only] Type of the resource. Always compute#metadata for metadata. + - !ruby/object:Api::Type::String + name: 'fingerprint' + description: | + Specifies a fingerprint for this request, which is essentially a hash of the metadata's contents and used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update metadata. You must always provide an up-to-date fingerprint hash in order to update or change metadata, otherwise the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make a get() request to retrieve the resource. + - !ruby/object:Api::Type::Array + name: 'items' + description: | + Array of key/value pairs. The total size of all keys and values must be less than 512 KB. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'key' + description: | + Key for the metadata entry. Keys must conform to the following regexp: [a-zA-Z0-9-_]+, and be less than 128 bytes in length. This is reflected as part of a URL in the metadata server. Additionally, to avoid ambiguity, keys must not conflict with any other metadata keys for the project. + - !ruby/object:Api::Type::String + name: 'value' + description: | + Value for the metadata entry. These are free-form strings, and only have meaning as interpreted by the image running in the instance. The only restriction placed on values is that their size must be less than or equal to 262144 bytes (256 KiB). + - !ruby/object:Api::Type::Array + name: 'serviceAccounts' + description: | + A list of service accounts with specified scopes. Access tokens for these service accounts are available to the instances that are created from this machine image. Use metadata queries to obtain the access tokens for these instances. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'email' + description: | + Email address of the service account. + - !ruby/object:Api::Type::Array + name: 'scopes' + description: | + The list of scopes to be made available for this service account. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'scheduling' + description: | + Sets the scheduling options for an Instance. + properties: + - !ruby/object:Api::Type::Enum + name: 'onHostMaintenance' + description: | + Defines the maintenance behavior for this instance. For standard instances, the default behavior is MIGRATE. For preemptible instances, the default and only possible behavior is TERMINATE. For more information, see Set VM host maintenance policy. + values: + - :MIGRATE + - :TERMINATE + - !ruby/object:Api::Type::Boolean + name: 'automaticRestart' + description: | + Specifies whether the instance should be automatically restarted if it is terminated by Compute Engine (not terminated by a user). You can only set the automatic restart option for standard instances. Preemptible instances cannot be automatically restarted. By default, this is set to true so an instance is automatically restarted if it is terminated by Compute Engine. + - !ruby/object:Api::Type::Boolean + name: 'preemptible' + description: | + Defines whether the instance is preemptible. This can only be set during instance creation or while the instance is stopped and therefore, in a `TERMINATED` state. See Instance Life Cycle for more information on the possible instance states. + - !ruby/object:Api::Type::Array + name: 'nodeAffinities' + description: | + A set of node affinity and anti-affinity configurations. Refer to Configuring node affinity for more information. Overrides reservationAffinity. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'key' + description: | + Corresponds to the label key of Node resource. + - !ruby/object:Api::Type::Enum + name: 'operator' + description: | + Defines the operation of node selection. Valid operators are IN for affinity and NOT_IN for anti-affinity. + values: + - :IN + - :NOT_IN + - :OPERATOR_UNSPECIFIED + - !ruby/object:Api::Type::Array + name: 'values' + description: | + Corresponds to the label values of Node resource. + item_type: Api::Type::String + - !ruby/object:Api::Type::Integer + name: 'minNodeCpus' + description: | + The minimum number of virtual CPUs this instance will consume when running on a sole-tenant node. + - !ruby/object:Api::Type::String + name: 'locationHint' + description: | + An opaque location hint used to place the instance close to other resources. This field is for use by internal tools that use the public API. + - !ruby/object:Api::Type::Enum + name: 'provisioningModel' + description: | + Specifies the provisioning model of the instance. + values: + - :SPOT + - :STANDARD + - !ruby/object:Api::Type::Enum + name: 'instanceTerminationAction' + description: | + Specifies the termination action for the instance. + values: + - :DELETE + - :INSTANCE_TERMINATION_ACTION_UNSPECIFIED + - :STOP + - !ruby/object:Api::Type::NestedObject + name: 'localSsdRecoveryTimeout' + description: | + A Duration represents a fixed-length span of time represented as a count of seconds and fractions of seconds at nanosecond resolution. It is independent of any calendar and concepts like "day" or "month". Range is approximately 10,000 years. + properties: + - !ruby/object:Api::Type::String + name: 'seconds' + description: | + Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 inclusive. Note: these bounds are computed from: 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years + - !ruby/object:Api::Type::Integer + name: 'nanos' + description: | + Span of time that's a fraction of a second at nanosecond resolution. Durations less than one second are represented with a 0 `seconds` field and a positive `nanos` field. Must be from 0 to 999,999,999 inclusive. + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + Labels to apply to instances that are created from this machine image. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::Array + name: 'guestAccelerators' + description: | + A list of guest accelerator cards' type and count to use for instances created from this machine image. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'acceleratorType' + description: | + Full or partial URL of the accelerator type resource to attach to this instance. For example: projects/my-project/zones/us-central1-c/acceleratorTypes/nvidia-tesla-p100 If you are creating an instance template, specify only the accelerator name. See GPUs on Compute Engine for a full list of accelerator types. + - !ruby/object:Api::Type::Integer + name: 'acceleratorCount' + description: | + The number of the guest accelerator cards exposed to this instance. + - !ruby/object:Api::Type::String + name: 'minCpuPlatform' + description: | + Minimum cpu/platform to be used by instances created from this machine image. The instance may be scheduled on the specified or newer cpu/platform. Applicable values are the friendly names of CPU platforms, such as minCpuPlatform: "Intel Haswell" or minCpuPlatform: "Intel Sandy Bridge". For more information, read Specifying a Minimum CPU Platform. + - !ruby/object:Api::Type::Boolean + name: 'deletionProtection' + description: | + Whether the instance created from this machine image should be protected against deletion. + - !ruby/object:Api::Type::Enum + name: 'keyRevocationActionType' + description: | + KeyRevocationActionType of the instance. Supported options are "STOP" and "NONE". The default value is "NONE" if it is not specified. + values: + - :KEY_REVOCATION_ACTION_TYPE_UNSPECIFIED + - :NONE + - :STOP + - !ruby/object:Api::Type::NestedObject + name: 'instanceProperties' + description: | + + properties: + - !ruby/object:Api::Type::String + name: 'description' + description: | + An optional text description for the instances that are created from these properties. + - !ruby/object:Api::Type::NestedObject + name: 'tags' + description: | + A set of instance tags. + properties: + - !ruby/object:Api::Type::Array + name: 'items' + description: | + An array of tags. Each tag must be 1-63 characters long, and comply with RFC1035. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'fingerprint' + description: | + Specifies a fingerprint for this request, which is essentially a hash of the tags' contents and used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update tags. You must always provide an up-to-date fingerprint hash in order to update or change tags. To see the latest fingerprint, make get() request to the instance. + - !ruby/object:Api::Type::NestedObject + name: 'resourceManagerTags' + description: | + Resource manager tags to be bound to the instance. Tag keys and values have the same definition as resource manager tags. Keys must be in the format `tagKeys/{tag_key_id}`, and values are in the format `tagValues/456`. The field is ignored (both PUT & PATCH) when empty. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::String + name: 'machineType' + description: | + The machine type to use for instances that are created from these properties. + - !ruby/object:Api::Type::Boolean + name: 'canIpForward' + description: | + Enables instances created based on these properties to send packets with source IP addresses other than their own and receive packets with destination IP addresses other than their own. If these instances will be used as an IP gateway or it will be set as the next-hop in a Route resource, specify true. If unsure, leave this set to false. See the Enable IP forwarding documentation for more information. + - !ruby/object:Api::Type::Array + name: 'networkInterfaces' + description: | + An array of network access configurations for this interface. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'kind' + description: | + [Output Only] Type of the resource. Always compute#networkInterface for network interfaces. + - !ruby/object:Api::Type::String + name: 'network' + description: | + URL of the VPC network resource for this instance. When creating an instance, if neither the network nor the subnetwork is specified, the default network global/networks/default is used. If the selected project doesn't have the default network, you must specify a network or subnet. If the network is not specified but the subnetwork is specified, the network is inferred. If you specify this property, you can specify the network as a full or partial URL. For example, the following are all valid URLs: - https://www.googleapis.com/compute/v1/projects/project/global/networks/ network - projects/project/global/networks/network - global/networks/default + - !ruby/object:Api::Type::String + name: 'subnetwork' + description: | + The URL of the Subnetwork resource for this instance. If the network resource is in legacy mode, do not specify this field. If the network is in auto subnet mode, specifying the subnetwork is optional. If the network is in custom subnet mode, specifying the subnetwork is required. If you specify this field, you can specify the subnetwork as a full or partial URL. For example, the following are all valid URLs: - https://www.googleapis.com/compute/v1/projects/project/regions/region /subnetworks/subnetwork - regions/region/subnetworks/subnetwork + - !ruby/object:Api::Type::String + name: 'networkIP' + description: | + An IPv4 internal IP address to assign to the instance for this network interface. If not specified by the user, an unused internal IP is assigned by the system. + - !ruby/object:Api::Type::String + name: 'ipv6Address' + description: | + An IPv6 internal network address for this network interface. To use a static internal IP address, it must be unused and in the same region as the instance's zone. If not specified, Google Cloud will automatically assign an internal IPv6 address from the instance's subnetwork. + - !ruby/object:Api::Type::Integer + name: 'internalIpv6PrefixLength' + description: | + The prefix length of the primary internal IPv6 range. + - !ruby/object:Api::Type::String + name: 'name' + description: | + [Output Only] The name of the network interface, which is generated by the server. For a VM, the network interface uses the nicN naming format. Where N is a value between 0 and 7. The default interface value is nic0. + - !ruby/object:Api::Type::Array + name: 'accessConfigs' + description: | + An array of configurations for this interface. Currently, only one access config, ONE_TO_ONE_NAT, is supported. If there are no accessConfigs specified, then this instance will have no external internet access. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'kind' + description: | + [Output Only] Type of the resource. Always compute#accessConfig for access configs. + - !ruby/object:Api::Type::Enum + name: 'type' + description: | + The type of configuration. In accessConfigs (IPv4), the default and only option is ONE_TO_ONE_NAT. In ipv6AccessConfigs, the default and only option is DIRECT_IPV6. + values: + - :DIRECT_IPV6 + - :ONE_TO_ONE_NAT + - !ruby/object:Api::Type::String + name: 'name' + description: | + The name of this access configuration. In accessConfigs (IPv4), the default and recommended name is External NAT, but you can use any arbitrary string, such as My external IP or Network Access. In ipv6AccessConfigs, the recommend name is External IPv6. + - !ruby/object:Api::Type::String + name: 'natIP' + description: | + Applies to accessConfigs (IPv4) only. An external IP address associated with this instance. Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance. + - !ruby/object:Api::Type::String + name: 'externalIpv6' + description: | + Applies to ipv6AccessConfigs only. The first IPv6 address of the external IPv6 range associated with this instance, prefix length is stored in externalIpv6PrefixLength in ipv6AccessConfig. To use a static external IP address, it must be unused and in the same region as the instance's zone. If not specified, Google Cloud will automatically assign an external IPv6 address from the instance's subnetwork. + - !ruby/object:Api::Type::Integer + name: 'externalIpv6PrefixLength' + description: | + Applies to ipv6AccessConfigs only. The prefix length of the external IPv6 range. + - !ruby/object:Api::Type::Boolean + name: 'setPublicPtr' + description: | + Specifies whether a public DNS 'PTR' record should be created to map the external IP address of the instance to a DNS domain name. This field is not used in ipv6AccessConfig. A default PTR record will be created if the VM has external IPv6 range associated. + - !ruby/object:Api::Type::String + name: 'publicPtrDomainName' + description: | + The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be createc for first IP in associated external IPv6 range. + - !ruby/object:Api::Type::Enum + name: 'networkTier' + description: | + This signifies the networking tier used for configuring this access configuration and can only take the following values: PREMIUM, STANDARD. If an AccessConfig is specified without a valid external IP address, an ephemeral IP will be created with this networkTier. If an AccessConfig with a valid external IP address is specified, it must match that of the networkTier associated with the Address resource owning that IP. + values: + - :FIXED_STANDARD + - :PREMIUM + - :STANDARD + - :STANDARD_OVERRIDES_FIXED_STANDARD + - !ruby/object:Api::Type::String + name: 'securityPolicy' + description: | + [Output Only] The resource URL for the security policy associated with this access config. + - !ruby/object:Api::Type::Array + name: 'ipv6AccessConfigs' + description: | + An array of IPv6 access configurations for this interface. Currently, only one IPv6 access config, DIRECT_IPV6, is supported. If there is no ipv6AccessConfig specified, then this instance will have no external IPv6 Internet access. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'kind' + description: | + [Output Only] Type of the resource. Always compute#accessConfig for access configs. + - !ruby/object:Api::Type::Enum + name: 'type' + description: | + The type of configuration. In accessConfigs (IPv4), the default and only option is ONE_TO_ONE_NAT. In ipv6AccessConfigs, the default and only option is DIRECT_IPV6. + values: + - :DIRECT_IPV6 + - :ONE_TO_ONE_NAT + - !ruby/object:Api::Type::String + name: 'name' + description: | + The name of this access configuration. In accessConfigs (IPv4), the default and recommended name is External NAT, but you can use any arbitrary string, such as My external IP or Network Access. In ipv6AccessConfigs, the recommend name is External IPv6. + - !ruby/object:Api::Type::String + name: 'natIP' + description: | + Applies to accessConfigs (IPv4) only. An external IP address associated with this instance. Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance. + - !ruby/object:Api::Type::String + name: 'externalIpv6' + description: | + Applies to ipv6AccessConfigs only. The first IPv6 address of the external IPv6 range associated with this instance, prefix length is stored in externalIpv6PrefixLength in ipv6AccessConfig. To use a static external IP address, it must be unused and in the same region as the instance's zone. If not specified, Google Cloud will automatically assign an external IPv6 address from the instance's subnetwork. + - !ruby/object:Api::Type::Integer + name: 'externalIpv6PrefixLength' + description: | + Applies to ipv6AccessConfigs only. The prefix length of the external IPv6 range. + - !ruby/object:Api::Type::Boolean + name: 'setPublicPtr' + description: | + Specifies whether a public DNS 'PTR' record should be created to map the external IP address of the instance to a DNS domain name. This field is not used in ipv6AccessConfig. A default PTR record will be created if the VM has external IPv6 range associated. + - !ruby/object:Api::Type::String + name: 'publicPtrDomainName' + description: | + The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be createc for first IP in associated external IPv6 range. + - !ruby/object:Api::Type::Enum + name: 'networkTier' + description: | + This signifies the networking tier used for configuring this access configuration and can only take the following values: PREMIUM, STANDARD. If an AccessConfig is specified without a valid external IP address, an ephemeral IP will be created with this networkTier. If an AccessConfig with a valid external IP address is specified, it must match that of the networkTier associated with the Address resource owning that IP. + values: + - :FIXED_STANDARD + - :PREMIUM + - :STANDARD + - :STANDARD_OVERRIDES_FIXED_STANDARD + - !ruby/object:Api::Type::String + name: 'securityPolicy' + description: | + [Output Only] The resource URL for the security policy associated with this access config. + - !ruby/object:Api::Type::Array + name: 'aliasIpRanges' + description: | + An array of alias IP ranges for this network interface. You can only specify this field for network interfaces in VPC networks. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'ipCidrRange' + description: | + The IP alias ranges to allocate for this interface. This IP CIDR range must belong to the specified subnetwork and cannot contain IP addresses reserved by system or used by other network interfaces. This range may be a single IP address (such as 10.2.3.4), a netmask (such as /24) or a CIDR-formatted string (such as 10.1.2.0/24). + - !ruby/object:Api::Type::String + name: 'subnetworkRangeName' + description: | + The name of a subnetwork secondary IP range from which to allocate an IP alias range. If not specified, the primary range of the subnetwork is used. + - !ruby/object:Api::Type::String + name: 'fingerprint' + description: | + Fingerprint hash of contents stored in this network interface. This field will be ignored when inserting an Instance or adding a NetworkInterface. An up-to-date fingerprint must be provided in order to update the NetworkInterface. The request will fail with error 400 Bad Request if the fingerprint is not provided, or 412 Precondition Failed if the fingerprint is out of date. + - !ruby/object:Api::Type::Enum + name: 'stackType' + description: | + The stack type for this network interface. To assign only IPv4 addresses, use IPV4_ONLY. To assign both IPv4 and IPv6 addresses, use IPV4_IPV6. If not specified, IPV4_ONLY is used. This field can be both set at instance creation and update network interface operations. + values: + - :IPV4_IPV6 + - :IPV4_ONLY + - !ruby/object:Api::Type::Enum + name: 'ipv6AccessType' + description: | + [Output Only] One of EXTERNAL, INTERNAL to indicate whether the IP can be accessed from the Internet. This field is always inherited from its subnetwork. Valid only if stackType is IPV4_IPV6. + values: + - :EXTERNAL + - :INTERNAL + - !ruby/object:Api::Type::Integer + name: 'queueCount' + description: | + The networking queue count that's specified by users for the network interface. Both Rx and Tx queues will be set to this number. It'll be empty if not specified by the users. + - !ruby/object:Api::Type::Enum + name: 'nicType' + description: | + The type of vNIC to be used on this interface. This may be gVNIC or VirtioNet. + values: + - :GVNIC + - :UNSPECIFIED_NIC_TYPE + - :VIRTIO_NET + - !ruby/object:Api::Type::String + name: 'networkAttachment' + description: | + The URL of the network attachment that this interface should connect to in the following format: projects/{project_number}/regions/{region_name}/networkAttachments/{network_attachment_name}. + - !ruby/object:Api::Type::Array + name: 'disks' + description: | + An array of disks that are associated with the instances that are created from these properties. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'kind' + description: | + [Output Only] Type of the resource. Always compute#attachedDisk for attached disks. + - !ruby/object:Api::Type::Enum + name: 'type' + description: | + Specifies the type of the disk, either SCRATCH or PERSISTENT. If not specified, the default is PERSISTENT. + values: + - :PERSISTENT + - :SCRATCH + - !ruby/object:Api::Type::Enum + name: 'mode' + description: | + The mode in which to attach this disk, either READ_WRITE or READ_ONLY. If not specified, the default is to attach the disk in READ_WRITE mode. + values: + - :READ_ONLY + - :READ_WRITE + - !ruby/object:Api::Type::Enum + name: 'savedState' + description: | + For LocalSSD disks on VM Instances in STOPPED or SUSPENDED state, this field is set to PRESERVED if the LocalSSD data has been saved to a persistent location by customer request. (see the discard_local_ssd option on Stop/Suspend). Read-only in the api. + values: + - :DISK_SAVED_STATE_UNSPECIFIED + - :PRESERVED + - !ruby/object:Api::Type::String + name: 'source' + description: | + Specifies a valid partial or full URL to an existing Persistent Disk resource. When creating a new instance, one of initializeParams.sourceImage or initializeParams.sourceSnapshot or disks.source is required except for local SSD. If desired, you can also attach existing non-root persistent disks using this property. This field is only applicable for persistent disks. Note that for InstanceTemplate, specify the disk name for zonal disk, and the URL for regional disk. + - !ruby/object:Api::Type::String + name: 'deviceName' + description: | + Specifies a unique device name of your choice that is reflected into the /dev/disk/by-id/google-* tree of a Linux operating system running within the instance. This name can be used to reference the device for mounting, resizing, and so on, from within the instance. If not specified, the server chooses a default device name to apply to this disk, in the form persistent-disk-x, where x is a number assigned by Google Compute Engine. This field is only applicable for persistent disks. + - !ruby/object:Api::Type::Integer + name: 'index' + description: | + [Output Only] A zero-based index to this disk, where 0 is reserved for the boot disk. If you have many disks attached to an instance, each disk would have a unique index number. + - !ruby/object:Api::Type::Boolean + name: 'boot' + description: | + Indicates that this is a boot disk. The virtual machine will use the first partition of the disk for its root filesystem. + - !ruby/object:Api::Type::NestedObject + name: 'initializeParams' + description: | + [Input Only] Specifies the parameters for a new disk that will be created alongside the new instance. Use initialization parameters to create boot disks or local SSDs attached to the new instance. This field is persisted and returned for instanceTemplate and not returned in the context of instance. This property is mutually exclusive with the source property; you can only define one or the other, but not both. + properties: + - !ruby/object:Api::Type::String + name: 'diskName' + description: | + Specifies the disk name. If not specified, the default is to use the name of the instance. If a disk with the same name already exists in the given region, the existing disk is attached to the new instance and the new disk is not created. + - !ruby/object:Api::Type::String + name: 'sourceImage' + description: | + The source image to create this disk. When creating a new instance, one of initializeParams.sourceImage or initializeParams.sourceSnapshot or disks.source is required except for local SSD. To create a disk with one of the public operating system images, specify the image by its family name. For example, specify family/debian-9 to use the latest Debian 9 image: projects/debian-cloud/global/images/family/debian-9 Alternatively, use a specific version of a public operating system image: projects/debian-cloud/global/images/debian-9-stretch-vYYYYMMDD To create a disk with a custom image that you created, specify the image name in the following format: global/images/my-custom-image You can also specify a custom image by its image family, which returns the latest version of the image in that family. Replace the image name with family/family-name: global/images/family/my-image-family If the source image is deleted later, this field will not be set. + - !ruby/object:Api::Type::String + name: 'diskSizeGb' + description: | + Specifies the size of the disk in base-2 GB. The size must be at least 10 GB. If you specify a sourceImage, which is required for boot disks, the default size is the size of the sourceImage. If you do not specify a sourceImage, the default disk size is 500 GB. + - !ruby/object:Api::Type::String + name: 'diskType' + description: | + Specifies the disk type to use to create the instance. If not specified, the default is pd-standard, specified using the full URL. For example: https://www.googleapis.com/compute/v1/projects/project/zones/zone /diskTypes/pd-standard For a full list of acceptable values, see Persistent disk types. If you specify this field when creating a VM, you can provide either the full or partial URL. For example, the following values are valid: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /diskTypes/diskType - projects/project/zones/zone/diskTypes/diskType - zones/zone/diskTypes/diskType If you specify this field when creating or updating an instance template or all-instances configuration, specify the type of the disk, not the URL. For example: pd-standard. + - !ruby/object:Api::Type::NestedObject + name: 'sourceImageEncryptionKey' + description: | + + properties: + - !ruby/object:Api::Type::String + name: 'rawKey' + description: | + Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 base64 to either encrypt or decrypt this resource. You can provide either the rawKey or the rsaEncryptedKey. For example: "rawKey": "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0=" + - !ruby/object:Api::Type::String + name: 'rsaEncryptedKey' + description: | + Specifies an RFC 4648 base64 encoded, RSA-wrapped 2048-bit customer-supplied encryption key to either encrypt or decrypt this resource. You can provide either the rawKey or the rsaEncryptedKey. For example: "rsaEncryptedKey": "ieCx/NcW06PcT7Ep1X6LUTc/hLvUDYyzSZPPVCVPTVEohpeHASqC8uw5TzyO9U+Fka9JFH z0mBibXUInrC/jEk014kCK/NPjYgEMOyssZ4ZINPKxlUh2zn1bV+MCaTICrdmuSBTWlUUiFoD D6PYznLwh8ZNdaheCeZ8ewEXgFQ8V+sDroLaN3Xs3MDTXQEMMoNUXMCZEIpg9Vtp9x2oe==" The key must meet the following requirements before you can provide it to Compute Engine: 1. The key is wrapped using a RSA public key certificate provided by Google. 2. After being wrapped, the key must be encoded in RFC 4648 base64 encoding. Gets the RSA public key certificate provided by Google at: https://cloud-certs.storage.googleapis.com/google-cloud-csek-ingress.pem + - !ruby/object:Api::Type::String + name: 'kmsKeyName' + description: | + The name of the encryption key that is stored in Google Cloud KMS. For example: "kmsKeyName": "projects/kms_project_id/locations/region/keyRings/ key_region/cryptoKeys/key The fully-qualifed key name may be returned for resource GET requests. For example: "kmsKeyName": "projects/kms_project_id/locations/region/keyRings/ key_region/cryptoKeys/key /cryptoKeyVersions/1 + - !ruby/object:Api::Type::String + name: 'sha256' + description: | + [Output only] The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied encryption key that protects this resource. + - !ruby/object:Api::Type::String + name: 'kmsKeyServiceAccount' + description: | + The service account being used for the encryption request for the given KMS key. If absent, the Compute Engine default service account is used. For example: "kmsKeyServiceAccount": "name@project_id.iam.gserviceaccount.com/ + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + Labels to apply to this disk. These can be later modified by the disks.setLabels method. This field is only applicable for persistent disks. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::String + name: 'sourceSnapshot' + description: | + The source snapshot to create this disk. When creating a new instance, one of initializeParams.sourceSnapshot or initializeParams.sourceImage or disks.source is required except for local SSD. To create a disk with a snapshot that you created, specify the snapshot name in the following format: global/snapshots/my-backup If the source snapshot is deleted later, this field will not be set. + - !ruby/object:Api::Type::NestedObject + name: 'sourceSnapshotEncryptionKey' + description: | + + properties: + - !ruby/object:Api::Type::String + name: 'rawKey' + description: | + Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 base64 to either encrypt or decrypt this resource. You can provide either the rawKey or the rsaEncryptedKey. For example: "rawKey": "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0=" + - !ruby/object:Api::Type::String + name: 'rsaEncryptedKey' + description: | + Specifies an RFC 4648 base64 encoded, RSA-wrapped 2048-bit customer-supplied encryption key to either encrypt or decrypt this resource. You can provide either the rawKey or the rsaEncryptedKey. For example: "rsaEncryptedKey": "ieCx/NcW06PcT7Ep1X6LUTc/hLvUDYyzSZPPVCVPTVEohpeHASqC8uw5TzyO9U+Fka9JFH z0mBibXUInrC/jEk014kCK/NPjYgEMOyssZ4ZINPKxlUh2zn1bV+MCaTICrdmuSBTWlUUiFoD D6PYznLwh8ZNdaheCeZ8ewEXgFQ8V+sDroLaN3Xs3MDTXQEMMoNUXMCZEIpg9Vtp9x2oe==" The key must meet the following requirements before you can provide it to Compute Engine: 1. The key is wrapped using a RSA public key certificate provided by Google. 2. After being wrapped, the key must be encoded in RFC 4648 base64 encoding. Gets the RSA public key certificate provided by Google at: https://cloud-certs.storage.googleapis.com/google-cloud-csek-ingress.pem + - !ruby/object:Api::Type::String + name: 'kmsKeyName' + description: | + The name of the encryption key that is stored in Google Cloud KMS. For example: "kmsKeyName": "projects/kms_project_id/locations/region/keyRings/ key_region/cryptoKeys/key The fully-qualifed key name may be returned for resource GET requests. For example: "kmsKeyName": "projects/kms_project_id/locations/region/keyRings/ key_region/cryptoKeys/key /cryptoKeyVersions/1 + - !ruby/object:Api::Type::String + name: 'sha256' + description: | + [Output only] The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied encryption key that protects this resource. + - !ruby/object:Api::Type::String + name: 'kmsKeyServiceAccount' + description: | + The service account being used for the encryption request for the given KMS key. If absent, the Compute Engine default service account is used. For example: "kmsKeyServiceAccount": "name@project_id.iam.gserviceaccount.com/ + - !ruby/object:Api::Type::String + name: 'description' + description: | + An optional description. Provide this property when creating the disk. + - !ruby/object:Api::Type::Array + name: 'replicaZones' + description: | + Required for each regional disk associated with the instance. Specify the URLs of the zones where the disk should be replicated to. You must provide exactly two replica zones, and one zone must be the same as the instance zone. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'resourcePolicies' + description: | + Resource policies applied to this disk for automatic snapshot creations. Specified using the full or partial URL. For instance template, specify only the resource policy name. + item_type: Api::Type::String + - !ruby/object:Api::Type::Enum + name: 'onUpdateAction' + description: | + Specifies which action to take on instance update with this disk. Default is to use the existing disk. + values: + - :RECREATE_DISK + - :RECREATE_DISK_IF_SOURCE_CHANGED + - :USE_EXISTING_DISK + - !ruby/object:Api::Type::String + name: 'provisionedIops' + description: | + Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. Values must be between 10,000 and 120,000. For more details, see the Extreme persistent disk documentation. + - !ruby/object:Api::Type::Array + name: 'licenses' + description: | + A list of publicly visible licenses. Reserved for Google's use. + item_type: Api::Type::String + - !ruby/object:Api::Type::Enum + name: 'architecture' + description: | + The architecture of the attached disk. Valid values are arm64 or x86_64. + values: + - :ARCHITECTURE_UNSPECIFIED + - :ARM64 + - :X86_64 + - !ruby/object:Api::Type::NestedObject + name: 'resourceManagerTags' + description: | + Resource manager tags to be bound to the disk. Tag keys and values have the same definition as resource manager tags. Keys must be in the format `tagKeys/{tag_key_id}`, and values are in the format `tagValues/456`. The field is ignored (both PUT & PATCH) when empty. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::String + name: 'provisionedThroughput' + description: | + Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be between 1 and 7,124. + - !ruby/object:Api::Type::Boolean + name: 'autoDelete' + description: | + Specifies whether the disk will be auto-deleted when the instance is deleted (but not when the disk is detached from the instance). + - !ruby/object:Api::Type::Array + name: 'licenses' + description: | + [Output Only] Any valid publicly visible licenses. + item_type: Api::Type::String + - !ruby/object:Api::Type::Enum + name: 'interface' + description: | + Specifies the disk interface to use for attaching this disk, which is either SCSI or NVME. For most machine types, the default is SCSI. Local SSDs can use either NVME or SCSI. In certain configurations, persistent disks can use NVMe. For more information, see About persistent disks. + values: + - :NVME + - :SCSI + - !ruby/object:Api::Type::Array + name: 'guestOsFeatures' + description: | + A list of features to enable on the guest operating system. Applicable only for bootable images. Read Enabling guest operating system features to see a list of available options. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::Enum + name: 'type' + description: | + The ID of a supported feature. To add multiple values, use commas to separate values. Set to one or more of the following values: - VIRTIO_SCSI_MULTIQUEUE - WINDOWS - MULTI_IP_SUBNET - UEFI_COMPATIBLE - GVNIC - SEV_CAPABLE - SUSPEND_RESUME_COMPATIBLE - SEV_LIVE_MIGRATABLE - SEV_SNP_CAPABLE For more information, see Enabling guest operating system features. + values: + - :FEATURE_TYPE_UNSPECIFIED + - :GVNIC + - :MULTI_IP_SUBNET + - :SECURE_BOOT + - :SEV_CAPABLE + - :SEV_LIVE_MIGRATABLE + - :SEV_LIVE_MIGRATABLE_V2 + - :SEV_SNP_CAPABLE + - :UEFI_COMPATIBLE + - :VIRTIO_SCSI_MULTIQUEUE + - :WINDOWS + - !ruby/object:Api::Type::NestedObject + name: 'diskEncryptionKey' + description: | + + properties: + - !ruby/object:Api::Type::String + name: 'rawKey' + description: | + Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 base64 to either encrypt or decrypt this resource. You can provide either the rawKey or the rsaEncryptedKey. For example: "rawKey": "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0=" + - !ruby/object:Api::Type::String + name: 'rsaEncryptedKey' + description: | + Specifies an RFC 4648 base64 encoded, RSA-wrapped 2048-bit customer-supplied encryption key to either encrypt or decrypt this resource. You can provide either the rawKey or the rsaEncryptedKey. For example: "rsaEncryptedKey": "ieCx/NcW06PcT7Ep1X6LUTc/hLvUDYyzSZPPVCVPTVEohpeHASqC8uw5TzyO9U+Fka9JFH z0mBibXUInrC/jEk014kCK/NPjYgEMOyssZ4ZINPKxlUh2zn1bV+MCaTICrdmuSBTWlUUiFoD D6PYznLwh8ZNdaheCeZ8ewEXgFQ8V+sDroLaN3Xs3MDTXQEMMoNUXMCZEIpg9Vtp9x2oe==" The key must meet the following requirements before you can provide it to Compute Engine: 1. The key is wrapped using a RSA public key certificate provided by Google. 2. After being wrapped, the key must be encoded in RFC 4648 base64 encoding. Gets the RSA public key certificate provided by Google at: https://cloud-certs.storage.googleapis.com/google-cloud-csek-ingress.pem + - !ruby/object:Api::Type::String + name: 'kmsKeyName' + description: | + The name of the encryption key that is stored in Google Cloud KMS. For example: "kmsKeyName": "projects/kms_project_id/locations/region/keyRings/ key_region/cryptoKeys/key The fully-qualifed key name may be returned for resource GET requests. For example: "kmsKeyName": "projects/kms_project_id/locations/region/keyRings/ key_region/cryptoKeys/key /cryptoKeyVersions/1 + - !ruby/object:Api::Type::String + name: 'sha256' + description: | + [Output only] The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied encryption key that protects this resource. + - !ruby/object:Api::Type::String + name: 'kmsKeyServiceAccount' + description: | + The service account being used for the encryption request for the given KMS key. If absent, the Compute Engine default service account is used. For example: "kmsKeyServiceAccount": "name@project_id.iam.gserviceaccount.com/ + - !ruby/object:Api::Type::String + name: 'diskSizeGb' + description: | + The size of the disk in GB. + - !ruby/object:Api::Type::NestedObject + name: 'shieldedInstanceInitialState' + description: | + Initial State for shielded instance, these are public keys which are safe to store in public + properties: + - !ruby/object:Api::Type::NestedObject + name: 'pk' + description: | + + properties: + - !ruby/object:Api::Type::String + name: 'content' + description: | + The raw content in the secure keys file. + - !ruby/object:Api::Type::Enum + name: 'fileType' + description: | + The file type of source file. + values: + - :BIN + - :UNDEFINED + - :X509 + - !ruby/object:Api::Type::Array + name: 'keks' + description: | + The Key Exchange Key (KEK). + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'content' + description: | + The raw content in the secure keys file. + - !ruby/object:Api::Type::Enum + name: 'fileType' + description: | + The file type of source file. + values: + - :BIN + - :UNDEFINED + - :X509 + - !ruby/object:Api::Type::Array + name: 'dbs' + description: | + The Key Database (db). + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'content' + description: | + The raw content in the secure keys file. + - !ruby/object:Api::Type::Enum + name: 'fileType' + description: | + The file type of source file. + values: + - :BIN + - :UNDEFINED + - :X509 + - !ruby/object:Api::Type::Array + name: 'dbxs' + description: | + The forbidden key database (dbx). + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'content' + description: | + The raw content in the secure keys file. + - !ruby/object:Api::Type::Enum + name: 'fileType' + description: | + The file type of source file. + values: + - :BIN + - :UNDEFINED + - :X509 + - !ruby/object:Api::Type::Boolean + name: 'forceAttach' + description: | + [Input Only] Whether to force attach the regional disk even if it's currently attached to another instance. If you try to force attach a zonal disk to an instance, you will receive an error. + - !ruby/object:Api::Type::Enum + name: 'architecture' + description: | + [Output Only] The architecture of the attached disk. Valid values are ARM64 or X86_64. + values: + - :ARCHITECTURE_UNSPECIFIED + - :ARM64 + - :X86_64 + - !ruby/object:Api::Type::NestedObject + name: 'metadata' + description: | + A metadata key/value entry. + properties: + - !ruby/object:Api::Type::String + name: 'kind' + description: | + [Output Only] Type of the resource. Always compute#metadata for metadata. + - !ruby/object:Api::Type::String + name: 'fingerprint' + description: | + Specifies a fingerprint for this request, which is essentially a hash of the metadata's contents and used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update metadata. You must always provide an up-to-date fingerprint hash in order to update or change metadata, otherwise the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make a get() request to retrieve the resource. + - !ruby/object:Api::Type::Array + name: 'items' + description: | + Array of key/value pairs. The total size of all keys and values must be less than 512 KB. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'key' + description: | + Key for the metadata entry. Keys must conform to the following regexp: [a-zA-Z0-9-_]+, and be less than 128 bytes in length. This is reflected as part of a URL in the metadata server. Additionally, to avoid ambiguity, keys must not conflict with any other metadata keys for the project. + - !ruby/object:Api::Type::String + name: 'value' + description: | + Value for the metadata entry. These are free-form strings, and only have meaning as interpreted by the image running in the instance. The only restriction placed on values is that their size must be less than or equal to 262144 bytes (256 KiB). + - !ruby/object:Api::Type::Array + name: 'serviceAccounts' + description: | + A list of service accounts with specified scopes. Access tokens for these service accounts are available to the instances that are created from these properties. Use metadata queries to obtain the access tokens for these instances. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'email' + description: | + Email address of the service account. + - !ruby/object:Api::Type::Array + name: 'scopes' + description: | + The list of scopes to be made available for this service account. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'scheduling' + description: | + Sets the scheduling options for an Instance. + properties: + - !ruby/object:Api::Type::Enum + name: 'onHostMaintenance' + description: | + Defines the maintenance behavior for this instance. For standard instances, the default behavior is MIGRATE. For preemptible instances, the default and only possible behavior is TERMINATE. For more information, see Set VM host maintenance policy. + values: + - :MIGRATE + - :TERMINATE + - !ruby/object:Api::Type::Boolean + name: 'automaticRestart' + description: | + Specifies whether the instance should be automatically restarted if it is terminated by Compute Engine (not terminated by a user). You can only set the automatic restart option for standard instances. Preemptible instances cannot be automatically restarted. By default, this is set to true so an instance is automatically restarted if it is terminated by Compute Engine. + - !ruby/object:Api::Type::Boolean + name: 'preemptible' + description: | + Defines whether the instance is preemptible. This can only be set during instance creation or while the instance is stopped and therefore, in a `TERMINATED` state. See Instance Life Cycle for more information on the possible instance states. + - !ruby/object:Api::Type::Array + name: 'nodeAffinities' + description: | + A set of node affinity and anti-affinity configurations. Refer to Configuring node affinity for more information. Overrides reservationAffinity. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'key' + description: | + Corresponds to the label key of Node resource. + - !ruby/object:Api::Type::Enum + name: 'operator' + description: | + Defines the operation of node selection. Valid operators are IN for affinity and NOT_IN for anti-affinity. + values: + - :IN + - :NOT_IN + - :OPERATOR_UNSPECIFIED + - !ruby/object:Api::Type::Array + name: 'values' + description: | + Corresponds to the label values of Node resource. + item_type: Api::Type::String + - !ruby/object:Api::Type::Integer + name: 'minNodeCpus' + description: | + The minimum number of virtual CPUs this instance will consume when running on a sole-tenant node. + - !ruby/object:Api::Type::String + name: 'locationHint' + description: | + An opaque location hint used to place the instance close to other resources. This field is for use by internal tools that use the public API. + - !ruby/object:Api::Type::Enum + name: 'provisioningModel' + description: | + Specifies the provisioning model of the instance. + values: + - :SPOT + - :STANDARD + - !ruby/object:Api::Type::Enum + name: 'instanceTerminationAction' + description: | + Specifies the termination action for the instance. + values: + - :DELETE + - :INSTANCE_TERMINATION_ACTION_UNSPECIFIED + - :STOP + - !ruby/object:Api::Type::NestedObject + name: 'localSsdRecoveryTimeout' + description: | + A Duration represents a fixed-length span of time represented as a count of seconds and fractions of seconds at nanosecond resolution. It is independent of any calendar and concepts like "day" or "month". Range is approximately 10,000 years. + properties: + - !ruby/object:Api::Type::String + name: 'seconds' + description: | + Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 inclusive. Note: these bounds are computed from: 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years + - !ruby/object:Api::Type::Integer + name: 'nanos' + description: | + Span of time that's a fraction of a second at nanosecond resolution. Durations less than one second are represented with a 0 `seconds` field and a positive `nanos` field. Must be from 0 to 999,999,999 inclusive. + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + Labels to apply to instances that are created from these properties. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::Array + name: 'guestAccelerators' + description: | + A list of guest accelerator cards' type and count to use for instances created from these properties. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'acceleratorType' + description: | + Full or partial URL of the accelerator type resource to attach to this instance. For example: projects/my-project/zones/us-central1-c/acceleratorTypes/nvidia-tesla-p100 If you are creating an instance template, specify only the accelerator name. See GPUs on Compute Engine for a full list of accelerator types. + - !ruby/object:Api::Type::Integer + name: 'acceleratorCount' + description: | + The number of the guest accelerator cards exposed to this instance. + - !ruby/object:Api::Type::String + name: 'minCpuPlatform' + description: | + Minimum cpu/platform to be used by instances. The instance may be scheduled on the specified or newer cpu/platform. Applicable values are the friendly names of CPU platforms, such as minCpuPlatform: "Intel Haswell" or minCpuPlatform: "Intel Sandy Bridge". For more information, read Specifying a Minimum CPU Platform. + - !ruby/object:Api::Type::NestedObject + name: 'reservationAffinity' + description: | + Specifies the reservations that this instance can consume from. + properties: + - !ruby/object:Api::Type::Enum + name: 'consumeReservationType' + description: | + Specifies the type of reservation from which this instance can consume resources: ANY_RESERVATION (default), SPECIFIC_RESERVATION, or NO_RESERVATION. See Consuming reserved instances for examples. + values: + - :ANY_RESERVATION + - :NO_RESERVATION + - :SPECIFIC_RESERVATION + - :UNSPECIFIED + - !ruby/object:Api::Type::String + name: 'key' + description: | + Corresponds to the label key of a reservation resource. To target a SPECIFIC_RESERVATION by name, specify googleapis.com/reservation-name as the key and specify the name of your reservation as its value. + - !ruby/object:Api::Type::Array + name: 'values' + description: | + Corresponds to the label values of a reservation resource. This can be either a name to a reservation in the same project or "projects/different-project/reservations/some-reservation-name" to target a shared reservation in the same zone but in a different project. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'shieldedInstanceConfig' + description: | + A set of Shielded Instance options. + properties: + - !ruby/object:Api::Type::Boolean + name: 'enableSecureBoot' + description: | + Defines whether the instance has Secure Boot enabled. Disabled by default. + - !ruby/object:Api::Type::Boolean + name: 'enableVtpm' + description: | + Defines whether the instance has the vTPM enabled. Enabled by default. + - !ruby/object:Api::Type::Boolean + name: 'enableIntegrityMonitoring' + description: | + Defines whether the instance has integrity monitoring enabled. Enabled by default. + - !ruby/object:Api::Type::Array + name: 'resourcePolicies' + description: | + Resource policies (names, not URLs) applied to instances created from these properties. Note that for MachineImage, this is not supported yet. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'confidentialInstanceConfig' + description: | + A set of Confidential Instance options. + properties: + - !ruby/object:Api::Type::Boolean + name: 'enableConfidentialCompute' + description: | + Defines whether the instance should have confidential compute enabled. + - !ruby/object:Api::Type::Enum + name: 'privateIpv6GoogleAccess' + description: | + The private IPv6 google access type for VMs. If not specified, use INHERIT_FROM_SUBNETWORK as default. Note that for MachineImage, this is not supported yet. + values: + - :ENABLE_BIDIRECTIONAL_ACCESS_TO_GOOGLE + - :ENABLE_OUTBOUND_VM_ACCESS_TO_GOOGLE + - :INHERIT_FROM_SUBNETWORK + - !ruby/object:Api::Type::NestedObject + name: 'advancedMachineFeatures' + description: | + Specifies options for controlling advanced machine features. Options that would traditionally be configured in a BIOS belong here. Features that require operating system support may have corresponding entries in the GuestOsFeatures of an Image (e.g., whether or not the OS in the Image supports nested virtualization being enabled or disabled). + properties: + - !ruby/object:Api::Type::Boolean + name: 'enableNestedVirtualization' + description: | + Whether to enable nested virtualization or not (default is false). + - !ruby/object:Api::Type::Integer + name: 'threadsPerCore' + description: | + The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed. + - !ruby/object:Api::Type::Integer + name: 'visibleCoreCount' + description: | + The number of physical cores to expose to an instance. Multiply by the number of threads per core to compute the total number of virtual CPUs to expose to the instance. If unset, the number of cores is inferred from the instance's nominal CPU count and the underlying platform's SMT width. + - !ruby/object:Api::Type::Boolean + name: 'enableUefiNetworking' + description: | + Whether to enable UEFI networking for instance creation. + - !ruby/object:Api::Type::NestedObject + name: 'networkPerformanceConfig' + description: | + + properties: + - !ruby/object:Api::Type::Enum + name: 'totalEgressBandwidthTier' + description: | + + values: + - :DEFAULT + - :TIER_1 + - !ruby/object:Api::Type::Enum + name: 'keyRevocationActionType' + description: | + KeyRevocationActionType of the instance. Supported options are "STOP" and "NONE". The default value is "NONE" if it is not specified. + values: + - :KEY_REVOCATION_ACTION_TYPE_UNSPECIFIED + - :NONE + - :STOP + - !ruby/object:Api::Type::Array + name: 'savedDisks' + description: | + An array of Machine Image specific properties for disks attached to the source instance + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'kind' + description: | + [Output Only] Type of the resource. Always compute#savedDisk for attached disks. + - !ruby/object:Api::Type::String + name: 'sourceDisk' + description: | + Specifies a URL of the disk attached to the source instance. + - !ruby/object:Api::Type::String + name: 'storageBytes' + description: | + [Output Only] Size of the individual disk snapshot used by this machine image. + - !ruby/object:Api::Type::Enum + name: 'storageBytesStatus' + description: | + [Output Only] An indicator whether storageBytes is in a stable state or it is being adjusted as a result of shared storage reallocation. This status can either be UPDATING, meaning the size of the snapshot is being updated, or UP_TO_DATE, meaning the size of the snapshot is up-to-date. + values: + - :UPDATING + - :UP_TO_DATE + - !ruby/object:Api::Type::Enum + name: 'architecture' + description: | + [Output Only] The architecture of the attached disk. + values: + - :ARCHITECTURE_UNSPECIFIED + - :ARM64 + - :X86_64 + - !ruby/object:Api::Type::Array + name: 'storageLocations' + description: | + The regional or multi-regional Cloud Storage bucket location where the machine image is stored. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'machineImageEncryptionKey' + description: | + + properties: + - !ruby/object:Api::Type::String + name: 'rawKey' + description: | + Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 base64 to either encrypt or decrypt this resource. You can provide either the rawKey or the rsaEncryptedKey. For example: "rawKey": "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0=" + - !ruby/object:Api::Type::String + name: 'rsaEncryptedKey' + description: | + Specifies an RFC 4648 base64 encoded, RSA-wrapped 2048-bit customer-supplied encryption key to either encrypt or decrypt this resource. You can provide either the rawKey or the rsaEncryptedKey. For example: "rsaEncryptedKey": "ieCx/NcW06PcT7Ep1X6LUTc/hLvUDYyzSZPPVCVPTVEohpeHASqC8uw5TzyO9U+Fka9JFH z0mBibXUInrC/jEk014kCK/NPjYgEMOyssZ4ZINPKxlUh2zn1bV+MCaTICrdmuSBTWlUUiFoD D6PYznLwh8ZNdaheCeZ8ewEXgFQ8V+sDroLaN3Xs3MDTXQEMMoNUXMCZEIpg9Vtp9x2oe==" The key must meet the following requirements before you can provide it to Compute Engine: 1. The key is wrapped using a RSA public key certificate provided by Google. 2. After being wrapped, the key must be encoded in RFC 4648 base64 encoding. Gets the RSA public key certificate provided by Google at: https://cloud-certs.storage.googleapis.com/google-cloud-csek-ingress.pem + - !ruby/object:Api::Type::String + name: 'kmsKeyName' + description: | + The name of the encryption key that is stored in Google Cloud KMS. For example: "kmsKeyName": "projects/kms_project_id/locations/region/keyRings/ key_region/cryptoKeys/key The fully-qualifed key name may be returned for resource GET requests. For example: "kmsKeyName": "projects/kms_project_id/locations/region/keyRings/ key_region/cryptoKeys/key /cryptoKeyVersions/1 + - !ruby/object:Api::Type::String + name: 'sha256' + description: | + [Output only] The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied encryption key that protects this resource. + - !ruby/object:Api::Type::String + name: 'kmsKeyServiceAccount' + description: | + The service account being used for the encryption request for the given KMS key. If absent, the Compute Engine default service account is used. For example: "kmsKeyServiceAccount": "name@project_id.iam.gserviceaccount.com/ + - !ruby/object:Api::Type::Boolean + name: 'guestFlush' + description: | + [Input Only] Whether to attempt an application consistent machine image by informing the OS to prepare for the snapshot process. + - !ruby/object:Api::Type::Array + name: 'sourceDiskEncryptionKeys' + description: | + [Input Only] The customer-supplied encryption key of the disks attached to the source instance. Required if the source disk is protected by a customer-supplied encryption key. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'sourceDisk' + description: | + URL of the disk attached to the source instance. This can be a full or valid partial URL. For example, the following are valid values: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /disks/disk - projects/project/zones/zone/disks/disk - zones/zone/disks/disk + - !ruby/object:Api::Type::NestedObject + name: 'diskEncryptionKey' + description: | + + properties: + - !ruby/object:Api::Type::String + name: 'rawKey' + description: | + Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 base64 to either encrypt or decrypt this resource. You can provide either the rawKey or the rsaEncryptedKey. For example: "rawKey": "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0=" + - !ruby/object:Api::Type::String + name: 'rsaEncryptedKey' + description: | + Specifies an RFC 4648 base64 encoded, RSA-wrapped 2048-bit customer-supplied encryption key to either encrypt or decrypt this resource. You can provide either the rawKey or the rsaEncryptedKey. For example: "rsaEncryptedKey": "ieCx/NcW06PcT7Ep1X6LUTc/hLvUDYyzSZPPVCVPTVEohpeHASqC8uw5TzyO9U+Fka9JFH z0mBibXUInrC/jEk014kCK/NPjYgEMOyssZ4ZINPKxlUh2zn1bV+MCaTICrdmuSBTWlUUiFoD D6PYznLwh8ZNdaheCeZ8ewEXgFQ8V+sDroLaN3Xs3MDTXQEMMoNUXMCZEIpg9Vtp9x2oe==" The key must meet the following requirements before you can provide it to Compute Engine: 1. The key is wrapped using a RSA public key certificate provided by Google. 2. After being wrapped, the key must be encoded in RFC 4648 base64 encoding. Gets the RSA public key certificate provided by Google at: https://cloud-certs.storage.googleapis.com/google-cloud-csek-ingress.pem + - !ruby/object:Api::Type::String + name: 'kmsKeyName' + description: | + The name of the encryption key that is stored in Google Cloud KMS. For example: "kmsKeyName": "projects/kms_project_id/locations/region/keyRings/ key_region/cryptoKeys/key The fully-qualifed key name may be returned for resource GET requests. For example: "kmsKeyName": "projects/kms_project_id/locations/region/keyRings/ key_region/cryptoKeys/key /cryptoKeyVersions/1 + - !ruby/object:Api::Type::String + name: 'sha256' + description: | + [Output only] The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied encryption key that protects this resource. + - !ruby/object:Api::Type::String + name: 'kmsKeyServiceAccount' + description: | + The service account being used for the encryption request for the given KMS key. If absent, the Compute Engine default service account is used. For example: "kmsKeyServiceAccount": "name@project_id.iam.gserviceaccount.com/ + - !ruby/object:Api::Type::String + name: 'totalStorageBytes' + description: | + [Output Only] Total size of the storage used by the machine image. + - !ruby/object:Api::Type::Boolean + name: 'satisfiesPzs' + description: | + [Output Only] Reserved for future use. + + + + + - !ruby/object:Api::Resource + name: NetworkEdgeSecurityService + self_link: 'projects/{{project}}/regions/{{region}}/networkEdgeSecurityServices/{{networkEdgeSecurityService}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/compute_v1/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + Represents a Google Cloud Armor network edge security service resource. + properties: + + - !ruby/object:Api::Type::String + name: 'kind' + description: | + [Output only] Type of the resource. Always compute#networkEdgeSecurityService for NetworkEdgeSecurityServices + - !ruby/object:Api::Type::String + name: 'id' + description: | + [Output Only] The unique identifier for the resource. This identifier is defined by the server. + - !ruby/object:Api::Type::String + name: 'creationTimestamp' + description: | + [Output Only] Creation timestamp in RFC3339 text format. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. + - !ruby/object:Api::Type::String + name: 'description' + description: | + An optional description of this resource. Provide this property when you create the resource. + - !ruby/object:Api::Type::String + name: 'selfLink' + description: | + [Output Only] Server-defined URL for the resource. + - !ruby/object:Api::Type::String + name: 'selfLinkWithId' + description: | + [Output Only] Server-defined URL for this resource with the resource id. + - !ruby/object:Api::Type::String + name: 'region' + description: | + [Output Only] URL of the region where the resource resides. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body. + - !ruby/object:Api::Type::String + name: 'fingerprint' + description: | + Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. This field will be ignored when inserting a NetworkEdgeSecurityService. An up-to-date fingerprint must be provided in order to update the NetworkEdgeSecurityService, otherwise the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make a get() request to retrieve a NetworkEdgeSecurityService. + - !ruby/object:Api::Type::String + name: 'securityPolicy' + description: | + The resource URL for the network edge security service associated with this network edge security service. + + + + + - !ruby/object:Api::Resource + name: NetworkEdgeSecurityService + self_link: 'projects/{{project}}/regions/{{region}}/networkEdgeSecurityServices/{{networkEdgeSecurityService}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/compute_v1/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + Represents a Google Cloud Armor network edge security service resource. + properties: + + - !ruby/object:Api::Type::String + name: 'kind' + description: | + [Output only] Type of the resource. Always compute#networkEdgeSecurityService for NetworkEdgeSecurityServices + - !ruby/object:Api::Type::String + name: 'id' + description: | + [Output Only] The unique identifier for the resource. This identifier is defined by the server. + - !ruby/object:Api::Type::String + name: 'creationTimestamp' + description: | + [Output Only] Creation timestamp in RFC3339 text format. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. + - !ruby/object:Api::Type::String + name: 'description' + description: | + An optional description of this resource. Provide this property when you create the resource. + - !ruby/object:Api::Type::String + name: 'selfLink' + description: | + [Output Only] Server-defined URL for the resource. + - !ruby/object:Api::Type::String + name: 'selfLinkWithId' + description: | + [Output Only] Server-defined URL for this resource with the resource id. + - !ruby/object:Api::Type::String + name: 'region' + description: | + [Output Only] URL of the region where the resource resides. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body. + - !ruby/object:Api::Type::String + name: 'fingerprint' + description: | + Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. This field will be ignored when inserting a NetworkEdgeSecurityService. An up-to-date fingerprint must be provided in order to update the NetworkEdgeSecurityService, otherwise the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make a get() request to retrieve a NetworkEdgeSecurityService. + - !ruby/object:Api::Type::String + name: 'securityPolicy' + description: | + The resource URL for the network edge security service associated with this network edge security service. + + + + + - !ruby/object:Api::Resource + name: NetworkAttachment + base_url: 'projects/{{project}}/regions/{{region}}/networkAttachments' + self_link: 'projects/{{project}}/regions/{{region}}/networkAttachments/{{networkAttachment}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/compute_v1/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + NetworkAttachments A network attachment resource ... + properties: + + - !ruby/object:Api::Type::String + name: 'kind' + description: | + [Output Only] Type of the resource. + - !ruby/object:Api::Type::String + name: 'id' + description: | + [Output Only] The unique identifier for the resource type. The server generates this identifier. + - !ruby/object:Api::Type::String + name: 'creationTimestamp' + description: | + [Output Only] Creation timestamp in RFC3339 text format. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. + - !ruby/object:Api::Type::String + name: 'description' + description: | + An optional description of this resource. Provide this property when you create the resource. + - !ruby/object:Api::Type::String + name: 'selfLink' + description: | + [Output Only] Server-defined URL for the resource. + - !ruby/object:Api::Type::String + name: 'selfLinkWithId' + description: | + [Output Only] Server-defined URL for this resource's resource id. + - !ruby/object:Api::Type::String + name: 'region' + description: | + [Output Only] URL of the region where the network attachment resides. This field applies only to the region resource. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body. + - !ruby/object:Api::Type::Enum + name: 'connectionPreference' + description: | + + values: + - :ACCEPT_AUTOMATIC + - :ACCEPT_MANUAL + - :INVALID + - !ruby/object:Api::Type::Array + name: 'connectionEndpoints' + description: | + [Output Only] An array of connections for all the producers connected to this network attachment. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::Enum + name: 'status' + description: | + The status of a connected endpoint to this network attachment. + values: + - :ACCEPTED + - :CLOSED + - :NEEDS_ATTENTION + - :PENDING + - :REJECTED + - :STATUS_UNSPECIFIED + - !ruby/object:Api::Type::String + name: 'projectIdOrNum' + description: | + The project id or number of the interface to which the IP was assigned. + - !ruby/object:Api::Type::String + name: 'subnetwork' + description: | + The subnetwork used to assign the IP to the producer instance network interface. + - !ruby/object:Api::Type::String + name: 'ipAddress' + description: | + The IPv4 address assigned to the producer instance network interface. This value will be a range in case of Serverless. + - !ruby/object:Api::Type::String + name: 'ipv6Address' + description: | + The IPv6 address assigned to the producer instance network interface. This is only assigned when the stack types of both the instance network interface and the consumer subnet are IPv4_IPv6. + - !ruby/object:Api::Type::Array + name: 'secondaryIpCidrRanges' + description: | + Alias IP ranges from the same subnetwork. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'subnetworkCidrRange' + description: | + [Output Only] The CIDR range of the subnet from which the IPv4 internal IP was allocated from. + - !ruby/object:Api::Type::Array + name: 'subnetworks' + description: | + An array of URLs where each entry is the URL of a subnet provided by the service consumer to use for endpoints in the producers that connect to this network attachment. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'producerRejectLists' + description: | + Projects that are not allowed to connect to this network attachment. The project can be specified using its id or number. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'producerAcceptLists' + description: | + Projects that are allowed to connect to this network attachment. The project can be specified using its id or number. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'fingerprint' + description: | + Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. An up-to-date fingerprint must be provided in order to patch. + - !ruby/object:Api::Type::String + name: 'network' + description: | + [Output Only] The URL of the network which the Network Attachment belongs to. Practically it is inferred by fetching the network of the first subnetwork associated. Because it is required that all the subnetworks must be from the same network, it is assured that the Network Attachment belongs to the same network as all the subnetworks. + + + + + - !ruby/object:Api::Resource + name: NetworkAttachment + base_url: 'projects/{{project}}/regions/{{region}}/networkAttachments' + self_link: 'projects/{{project}}/regions/{{region}}/networkAttachments/{{networkAttachment}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/compute_v1/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + NetworkAttachments A network attachment resource ... + properties: + + - !ruby/object:Api::Type::String + name: 'kind' + description: | + [Output Only] Type of the resource. + - !ruby/object:Api::Type::String + name: 'id' + description: | + [Output Only] The unique identifier for the resource type. The server generates this identifier. + - !ruby/object:Api::Type::String + name: 'creationTimestamp' + description: | + [Output Only] Creation timestamp in RFC3339 text format. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. + - !ruby/object:Api::Type::String + name: 'description' + description: | + An optional description of this resource. Provide this property when you create the resource. + - !ruby/object:Api::Type::String + name: 'selfLink' + description: | + [Output Only] Server-defined URL for the resource. + - !ruby/object:Api::Type::String + name: 'selfLinkWithId' + description: | + [Output Only] Server-defined URL for this resource's resource id. + - !ruby/object:Api::Type::String + name: 'region' + description: | + [Output Only] URL of the region where the network attachment resides. This field applies only to the region resource. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body. + - !ruby/object:Api::Type::Enum + name: 'connectionPreference' + description: | + + values: + - :ACCEPT_AUTOMATIC + - :ACCEPT_MANUAL + - :INVALID + - !ruby/object:Api::Type::Array + name: 'connectionEndpoints' + description: | + [Output Only] An array of connections for all the producers connected to this network attachment. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::Enum + name: 'status' + description: | + The status of a connected endpoint to this network attachment. + values: + - :ACCEPTED + - :CLOSED + - :NEEDS_ATTENTION + - :PENDING + - :REJECTED + - :STATUS_UNSPECIFIED + - !ruby/object:Api::Type::String + name: 'projectIdOrNum' + description: | + The project id or number of the interface to which the IP was assigned. + - !ruby/object:Api::Type::String + name: 'subnetwork' + description: | + The subnetwork used to assign the IP to the producer instance network interface. + - !ruby/object:Api::Type::String + name: 'ipAddress' + description: | + The IPv4 address assigned to the producer instance network interface. This value will be a range in case of Serverless. + - !ruby/object:Api::Type::String + name: 'ipv6Address' + description: | + The IPv6 address assigned to the producer instance network interface. This is only assigned when the stack types of both the instance network interface and the consumer subnet are IPv4_IPv6. + - !ruby/object:Api::Type::Array + name: 'secondaryIpCidrRanges' + description: | + Alias IP ranges from the same subnetwork. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'subnetworkCidrRange' + description: | + [Output Only] The CIDR range of the subnet from which the IPv4 internal IP was allocated from. + - !ruby/object:Api::Type::Array + name: 'subnetworks' + description: | + An array of URLs where each entry is the URL of a subnet provided by the service consumer to use for endpoints in the producers that connect to this network attachment. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'producerRejectLists' + description: | + Projects that are not allowed to connect to this network attachment. The project can be specified using its id or number. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'producerAcceptLists' + description: | + Projects that are allowed to connect to this network attachment. The project can be specified using its id or number. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'fingerprint' + description: | + Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. An up-to-date fingerprint must be provided in order to patch. + - !ruby/object:Api::Type::String + name: 'network' + description: | + [Output Only] The URL of the network which the Network Attachment belongs to. Practically it is inferred by fetching the network of the first subnetwork associated. Because it is required that all the subnetworks must be from the same network, it is assured that the Network Attachment belongs to the same network as all the subnetworks. + + + + + - !ruby/object:Api::Resource + name: RegionNetworkEndpointGroup + base_url: 'projects/{{project}}/regions/{{region}}/networkEndpointGroups' + self_link: 'projects/{{project}}/regions/{{region}}/networkEndpointGroups/{{networkEndpointGroup}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/compute/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + Represents a collection of network endpoints. A network endpoint group (NEG) defines how a set of endpoints should be reached, whether they are reachable, and where they are located. For more information about using NEGs for different use cases, see Network endpoint groups overview. + properties: + + - !ruby/object:Api::Type::String + name: 'kind' + description: | + [Output Only] Type of the resource. Always compute#networkEndpointGroup for network endpoint group. + - !ruby/object:Api::Type::String + name: 'id' + description: | + [Output Only] The unique identifier for the resource. This identifier is defined by the server. + - !ruby/object:Api::Type::String + name: 'creationTimestamp' + description: | + [Output Only] Creation timestamp in RFC3339 text format. + - !ruby/object:Api::Type::String + name: 'selfLink' + description: | + [Output Only] Server-defined URL for the resource. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. + - !ruby/object:Api::Type::String + name: 'description' + description: | + An optional description of this resource. Provide this property when you create the resource. + - !ruby/object:Api::Type::Enum + name: 'networkEndpointType' + description: | + Type of network endpoints in this network endpoint group. Can be one of GCE_VM_IP, GCE_VM_IP_PORT, NON_GCP_PRIVATE_IP_PORT, INTERNET_FQDN_PORT, INTERNET_IP_PORT, SERVERLESS, PRIVATE_SERVICE_CONNECT. + values: + - :GCE_VM_IP + - :GCE_VM_IP_PORT + - :INTERNET_FQDN_PORT + - :INTERNET_IP_PORT + - :NON_GCP_PRIVATE_IP_PORT + - :PRIVATE_SERVICE_CONNECT + - :SERVERLESS + - !ruby/object:Api::Type::Integer + name: 'size' + description: | + [Output only] Number of network endpoints in the network endpoint group. + - !ruby/object:Api::Type::String + name: 'region' + description: | + [Output Only] The URL of the region where the network endpoint group is located. + - !ruby/object:Api::Type::String + name: 'zone' + description: | + [Output Only] The URL of the zone where the network endpoint group is located. + - !ruby/object:Api::Type::String + name: 'network' + description: | + The URL of the network to which all network endpoints in the NEG belong. Uses "default" project network if unspecified. + - !ruby/object:Api::Type::String + name: 'subnetwork' + description: | + Optional URL of the subnetwork to which all network endpoints in the NEG belong. + - !ruby/object:Api::Type::Integer + name: 'defaultPort' + description: | + The default port used if the port number is not specified in the network endpoint. + - !ruby/object:Api::Type::NestedObject + name: 'annotations' + description: | + Metadata defined as annotations on the network endpoint group. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'cloudRun' + description: | + Configuration for a Cloud Run network endpoint group (NEG). The service must be provided explicitly or in the URL mask. The tag is optional, may be provided explicitly or in the URL mask. Note: Cloud Run service must be in the same project and located in the same region as the Serverless NEG. + properties: + - !ruby/object:Api::Type::String + name: 'service' + description: | + Cloud Run service is the main resource of Cloud Run. The service must be 1-63 characters long, and comply with RFC1035. Example value: "run-service". + - !ruby/object:Api::Type::String + name: 'tag' + description: | + Optional Cloud Run tag represents the "named-revision" to provide additional fine-grained traffic routing information. The tag must be 1-63 characters long, and comply with RFC1035. Example value: "revision-0010". + - !ruby/object:Api::Type::String + name: 'urlMask' + description: | + A template to parse and fields from a request URL. URL mask allows for routing to multiple Run services without having to create multiple network endpoint groups and backend services. For example, request URLs "foo1.domain.com/bar1" and "foo1.domain.com/bar2" can be backed by the same Serverless Network Endpoint Group (NEG) with URL mask ".domain.com/". The URL mask will parse them to { service="bar1", tag="foo1" } and { service="bar2", tag="foo2" } respectively. + - !ruby/object:Api::Type::NestedObject + name: 'appEngine' + description: | + Configuration for an App Engine network endpoint group (NEG). The service is optional, may be provided explicitly or in the URL mask. The version is optional and can only be provided explicitly or in the URL mask when service is present. Note: App Engine service must be in the same project and located in the same region as the Serverless NEG. + properties: + - !ruby/object:Api::Type::String + name: 'service' + description: | + Optional serving service. The service name is case-sensitive and must be 1-63 characters long. Example value: "default", "my-service". + - !ruby/object:Api::Type::String + name: 'version' + description: | + Optional serving version. The version name is case-sensitive and must be 1-100 characters long. Example value: "v1", "v2". + - !ruby/object:Api::Type::String + name: 'urlMask' + description: | + A template to parse service and version fields from a request URL. URL mask allows for routing to multiple App Engine services without having to create multiple Network Endpoint Groups and backend services. For example, the request URLs "foo1-dot-appname.appspot.com/v1" and "foo1-dot-appname.appspot.com/v2" can be backed by the same Serverless NEG with URL mask "-dot-appname.appspot.com/". The URL mask will parse them to { service = "foo1", version = "v1" } and { service = "foo1", version = "v2" } respectively. + - !ruby/object:Api::Type::NestedObject + name: 'cloudFunction' + description: | + Configuration for a Cloud Function network endpoint group (NEG). The function must be provided explicitly or in the URL mask. Note: Cloud Function must be in the same project and located in the same region as the Serverless NEG. + properties: + - !ruby/object:Api::Type::String + name: 'function' + description: | + A user-defined name of the Cloud Function. The function name is case-sensitive and must be 1-63 characters long. Example value: "func1". + - !ruby/object:Api::Type::String + name: 'urlMask' + description: | + A template to parse function field from a request URL. URL mask allows for routing to multiple Cloud Functions without having to create multiple Network Endpoint Groups and backend services. For example, request URLs " mydomain.com/function1" and "mydomain.com/function2" can be backed by the same Serverless NEG with URL mask "/". The URL mask will parse them to { function = "function1" } and { function = "function2" } respectively. + - !ruby/object:Api::Type::String + name: 'pscTargetService' + description: | + The target service url used to set up private service connection to a Google API or a PSC Producer Service Attachment. An example value is: "asia-northeast3-cloudkms.googleapis.com" + - !ruby/object:Api::Type::NestedObject + name: 'pscData' + description: | + All data that is specifically relevant to only network endpoint groups of type PRIVATE_SERVICE_CONNECT. + properties: + - !ruby/object:Api::Type::String + name: 'consumerPscAddress' + description: | + [Output Only] Address allocated from given subnetwork for PSC. This IP address acts as a VIP for a PSC NEG, allowing it to act as an endpoint in L7 PSC-XLB. + - !ruby/object:Api::Type::String + name: 'pscConnectionId' + description: | + [Output Only] The PSC connection id of the PSC Network Endpoint Group Consumer. + - !ruby/object:Api::Type::Enum + name: 'pscConnectionStatus' + description: | + [Output Only] The connection status of the PSC Forwarding Rule. + values: + - :ACCEPTED + - :CLOSED + - :NEEDS_ATTENTION + - :PENDING + - :REJECTED + - :STATUS_UNSPECIFIED + + + + + - !ruby/object:Api::Resource + name: RegionNetworkEndpointGroup + base_url: 'projects/{{project}}/regions/{{region}}/networkEndpointGroups' + self_link: 'projects/{{project}}/regions/{{region}}/networkEndpointGroups/{{networkEndpointGroup}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/compute/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + Represents a collection of network endpoints. A network endpoint group (NEG) defines how a set of endpoints should be reached, whether they are reachable, and where they are located. For more information about using NEGs for different use cases, see Network endpoint groups overview. + properties: + + - !ruby/object:Api::Type::String + name: 'kind' + description: | + [Output Only] Type of the resource. Always compute#networkEndpointGroup for network endpoint group. + - !ruby/object:Api::Type::String + name: 'id' + description: | + [Output Only] The unique identifier for the resource. This identifier is defined by the server. + - !ruby/object:Api::Type::String + name: 'creationTimestamp' + description: | + [Output Only] Creation timestamp in RFC3339 text format. + - !ruby/object:Api::Type::String + name: 'selfLink' + description: | + [Output Only] Server-defined URL for the resource. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. + - !ruby/object:Api::Type::String + name: 'description' + description: | + An optional description of this resource. Provide this property when you create the resource. + - !ruby/object:Api::Type::Enum + name: 'networkEndpointType' + description: | + Type of network endpoints in this network endpoint group. Can be one of GCE_VM_IP, GCE_VM_IP_PORT, NON_GCP_PRIVATE_IP_PORT, INTERNET_FQDN_PORT, INTERNET_IP_PORT, SERVERLESS, PRIVATE_SERVICE_CONNECT. + values: + - :GCE_VM_IP + - :GCE_VM_IP_PORT + - :INTERNET_FQDN_PORT + - :INTERNET_IP_PORT + - :NON_GCP_PRIVATE_IP_PORT + - :PRIVATE_SERVICE_CONNECT + - :SERVERLESS + - !ruby/object:Api::Type::Integer + name: 'size' + description: | + [Output only] Number of network endpoints in the network endpoint group. + - !ruby/object:Api::Type::String + name: 'region' + description: | + [Output Only] The URL of the region where the network endpoint group is located. + - !ruby/object:Api::Type::String + name: 'zone' + description: | + [Output Only] The URL of the zone where the network endpoint group is located. + - !ruby/object:Api::Type::String + name: 'network' + description: | + The URL of the network to which all network endpoints in the NEG belong. Uses "default" project network if unspecified. + - !ruby/object:Api::Type::String + name: 'subnetwork' + description: | + Optional URL of the subnetwork to which all network endpoints in the NEG belong. + - !ruby/object:Api::Type::Integer + name: 'defaultPort' + description: | + The default port used if the port number is not specified in the network endpoint. + - !ruby/object:Api::Type::NestedObject + name: 'annotations' + description: | + Metadata defined as annotations on the network endpoint group. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'cloudRun' + description: | + Configuration for a Cloud Run network endpoint group (NEG). The service must be provided explicitly or in the URL mask. The tag is optional, may be provided explicitly or in the URL mask. Note: Cloud Run service must be in the same project and located in the same region as the Serverless NEG. + properties: + - !ruby/object:Api::Type::String + name: 'service' + description: | + Cloud Run service is the main resource of Cloud Run. The service must be 1-63 characters long, and comply with RFC1035. Example value: "run-service". + - !ruby/object:Api::Type::String + name: 'tag' + description: | + Optional Cloud Run tag represents the "named-revision" to provide additional fine-grained traffic routing information. The tag must be 1-63 characters long, and comply with RFC1035. Example value: "revision-0010". + - !ruby/object:Api::Type::String + name: 'urlMask' + description: | + A template to parse and fields from a request URL. URL mask allows for routing to multiple Run services without having to create multiple network endpoint groups and backend services. For example, request URLs "foo1.domain.com/bar1" and "foo1.domain.com/bar2" can be backed by the same Serverless Network Endpoint Group (NEG) with URL mask ".domain.com/". The URL mask will parse them to { service="bar1", tag="foo1" } and { service="bar2", tag="foo2" } respectively. + - !ruby/object:Api::Type::NestedObject + name: 'appEngine' + description: | + Configuration for an App Engine network endpoint group (NEG). The service is optional, may be provided explicitly or in the URL mask. The version is optional and can only be provided explicitly or in the URL mask when service is present. Note: App Engine service must be in the same project and located in the same region as the Serverless NEG. + properties: + - !ruby/object:Api::Type::String + name: 'service' + description: | + Optional serving service. The service name is case-sensitive and must be 1-63 characters long. Example value: "default", "my-service". + - !ruby/object:Api::Type::String + name: 'version' + description: | + Optional serving version. The version name is case-sensitive and must be 1-100 characters long. Example value: "v1", "v2". + - !ruby/object:Api::Type::String + name: 'urlMask' + description: | + A template to parse service and version fields from a request URL. URL mask allows for routing to multiple App Engine services without having to create multiple Network Endpoint Groups and backend services. For example, the request URLs "foo1-dot-appname.appspot.com/v1" and "foo1-dot-appname.appspot.com/v2" can be backed by the same Serverless NEG with URL mask "-dot-appname.appspot.com/". The URL mask will parse them to { service = "foo1", version = "v1" } and { service = "foo1", version = "v2" } respectively. + - !ruby/object:Api::Type::NestedObject + name: 'cloudFunction' + description: | + Configuration for a Cloud Function network endpoint group (NEG). The function must be provided explicitly or in the URL mask. Note: Cloud Function must be in the same project and located in the same region as the Serverless NEG. + properties: + - !ruby/object:Api::Type::String + name: 'function' + description: | + A user-defined name of the Cloud Function. The function name is case-sensitive and must be 1-63 characters long. Example value: "func1". + - !ruby/object:Api::Type::String + name: 'urlMask' + description: | + A template to parse function field from a request URL. URL mask allows for routing to multiple Cloud Functions without having to create multiple Network Endpoint Groups and backend services. For example, request URLs " mydomain.com/function1" and "mydomain.com/function2" can be backed by the same Serverless NEG with URL mask "/". The URL mask will parse them to { function = "function1" } and { function = "function2" } respectively. + - !ruby/object:Api::Type::String + name: 'pscTargetService' + description: | + The target service url used to set up private service connection to a Google API or a PSC Producer Service Attachment. An example value is: "asia-northeast3-cloudkms.googleapis.com" + - !ruby/object:Api::Type::NestedObject + name: 'pscData' + description: | + All data that is specifically relevant to only network endpoint groups of type PRIVATE_SERVICE_CONNECT. + properties: + - !ruby/object:Api::Type::String + name: 'consumerPscAddress' + description: | + [Output Only] Address allocated from given subnetwork for PSC. This IP address acts as a VIP for a PSC NEG, allowing it to act as an endpoint in L7 PSC-XLB. + - !ruby/object:Api::Type::String + name: 'pscConnectionId' + description: | + [Output Only] The PSC connection id of the PSC Network Endpoint Group Consumer. + - !ruby/object:Api::Type::Enum + name: 'pscConnectionStatus' + description: | + [Output Only] The connection status of the PSC Forwarding Rule. + values: + - :ACCEPTED + - :CLOSED + - :NEEDS_ATTENTION + - :PENDING + - :REJECTED + - :STATUS_UNSPECIFIED + + + + + - !ruby/object:Api::Resource + name: GlobalNetworkEndpointGroup + base_url: 'projects/{{project}}/global/networkEndpointGroups' + self_link: 'projects/{{project}}/global/networkEndpointGroups/{{networkEndpointGroup}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/compute/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + Represents a collection of network endpoints. A network endpoint group (NEG) defines how a set of endpoints should be reached, whether they are reachable, and where they are located. For more information about using NEGs for different use cases, see Network endpoint groups overview. + properties: + + - !ruby/object:Api::Type::String + name: 'kind' + description: | + [Output Only] Type of the resource. Always compute#networkEndpointGroup for network endpoint group. + - !ruby/object:Api::Type::String + name: 'id' + description: | + [Output Only] The unique identifier for the resource. This identifier is defined by the server. + - !ruby/object:Api::Type::String + name: 'creationTimestamp' + description: | + [Output Only] Creation timestamp in RFC3339 text format. + - !ruby/object:Api::Type::String + name: 'selfLink' + description: | + [Output Only] Server-defined URL for the resource. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. + - !ruby/object:Api::Type::String + name: 'description' + description: | + An optional description of this resource. Provide this property when you create the resource. + - !ruby/object:Api::Type::Enum + name: 'networkEndpointType' + description: | + Type of network endpoints in this network endpoint group. Can be one of GCE_VM_IP, GCE_VM_IP_PORT, NON_GCP_PRIVATE_IP_PORT, INTERNET_FQDN_PORT, INTERNET_IP_PORT, SERVERLESS, PRIVATE_SERVICE_CONNECT. + values: + - :GCE_VM_IP + - :GCE_VM_IP_PORT + - :INTERNET_FQDN_PORT + - :INTERNET_IP_PORT + - :NON_GCP_PRIVATE_IP_PORT + - :PRIVATE_SERVICE_CONNECT + - :SERVERLESS + - !ruby/object:Api::Type::Integer + name: 'size' + description: | + [Output only] Number of network endpoints in the network endpoint group. + - !ruby/object:Api::Type::String + name: 'region' + description: | + [Output Only] The URL of the region where the network endpoint group is located. + - !ruby/object:Api::Type::String + name: 'zone' + description: | + [Output Only] The URL of the zone where the network endpoint group is located. + - !ruby/object:Api::Type::String + name: 'network' + description: | + The URL of the network to which all network endpoints in the NEG belong. Uses "default" project network if unspecified. + - !ruby/object:Api::Type::String + name: 'subnetwork' + description: | + Optional URL of the subnetwork to which all network endpoints in the NEG belong. + - !ruby/object:Api::Type::Integer + name: 'defaultPort' + description: | + The default port used if the port number is not specified in the network endpoint. + - !ruby/object:Api::Type::NestedObject + name: 'annotations' + description: | + Metadata defined as annotations on the network endpoint group. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'cloudRun' + description: | + Configuration for a Cloud Run network endpoint group (NEG). The service must be provided explicitly or in the URL mask. The tag is optional, may be provided explicitly or in the URL mask. Note: Cloud Run service must be in the same project and located in the same region as the Serverless NEG. + properties: + - !ruby/object:Api::Type::String + name: 'service' + description: | + Cloud Run service is the main resource of Cloud Run. The service must be 1-63 characters long, and comply with RFC1035. Example value: "run-service". + - !ruby/object:Api::Type::String + name: 'tag' + description: | + Optional Cloud Run tag represents the "named-revision" to provide additional fine-grained traffic routing information. The tag must be 1-63 characters long, and comply with RFC1035. Example value: "revision-0010". + - !ruby/object:Api::Type::String + name: 'urlMask' + description: | + A template to parse and fields from a request URL. URL mask allows for routing to multiple Run services without having to create multiple network endpoint groups and backend services. For example, request URLs "foo1.domain.com/bar1" and "foo1.domain.com/bar2" can be backed by the same Serverless Network Endpoint Group (NEG) with URL mask ".domain.com/". The URL mask will parse them to { service="bar1", tag="foo1" } and { service="bar2", tag="foo2" } respectively. + - !ruby/object:Api::Type::NestedObject + name: 'appEngine' + description: | + Configuration for an App Engine network endpoint group (NEG). The service is optional, may be provided explicitly or in the URL mask. The version is optional and can only be provided explicitly or in the URL mask when service is present. Note: App Engine service must be in the same project and located in the same region as the Serverless NEG. + properties: + - !ruby/object:Api::Type::String + name: 'service' + description: | + Optional serving service. The service name is case-sensitive and must be 1-63 characters long. Example value: "default", "my-service". + - !ruby/object:Api::Type::String + name: 'version' + description: | + Optional serving version. The version name is case-sensitive and must be 1-100 characters long. Example value: "v1", "v2". + - !ruby/object:Api::Type::String + name: 'urlMask' + description: | + A template to parse service and version fields from a request URL. URL mask allows for routing to multiple App Engine services without having to create multiple Network Endpoint Groups and backend services. For example, the request URLs "foo1-dot-appname.appspot.com/v1" and "foo1-dot-appname.appspot.com/v2" can be backed by the same Serverless NEG with URL mask "-dot-appname.appspot.com/". The URL mask will parse them to { service = "foo1", version = "v1" } and { service = "foo1", version = "v2" } respectively. + - !ruby/object:Api::Type::NestedObject + name: 'cloudFunction' + description: | + Configuration for a Cloud Function network endpoint group (NEG). The function must be provided explicitly or in the URL mask. Note: Cloud Function must be in the same project and located in the same region as the Serverless NEG. + properties: + - !ruby/object:Api::Type::String + name: 'function' + description: | + A user-defined name of the Cloud Function. The function name is case-sensitive and must be 1-63 characters long. Example value: "func1". + - !ruby/object:Api::Type::String + name: 'urlMask' + description: | + A template to parse function field from a request URL. URL mask allows for routing to multiple Cloud Functions without having to create multiple Network Endpoint Groups and backend services. For example, request URLs " mydomain.com/function1" and "mydomain.com/function2" can be backed by the same Serverless NEG with URL mask "/". The URL mask will parse them to { function = "function1" } and { function = "function2" } respectively. + - !ruby/object:Api::Type::String + name: 'pscTargetService' + description: | + The target service url used to set up private service connection to a Google API or a PSC Producer Service Attachment. An example value is: "asia-northeast3-cloudkms.googleapis.com" + - !ruby/object:Api::Type::NestedObject + name: 'pscData' + description: | + All data that is specifically relevant to only network endpoint groups of type PRIVATE_SERVICE_CONNECT. + properties: + - !ruby/object:Api::Type::String + name: 'consumerPscAddress' + description: | + [Output Only] Address allocated from given subnetwork for PSC. This IP address acts as a VIP for a PSC NEG, allowing it to act as an endpoint in L7 PSC-XLB. + - !ruby/object:Api::Type::String + name: 'pscConnectionId' + description: | + [Output Only] The PSC connection id of the PSC Network Endpoint Group Consumer. + - !ruby/object:Api::Type::Enum + name: 'pscConnectionStatus' + description: | + [Output Only] The connection status of the PSC Forwarding Rule. + values: + - :ACCEPTED + - :CLOSED + - :NEEDS_ATTENTION + - :PENDING + - :REJECTED + - :STATUS_UNSPECIFIED + + + + + - !ruby/object:Api::Resource + name: GlobalNetworkEndpointGroup + base_url: 'projects/{{project}}/global/networkEndpointGroups' + self_link: 'projects/{{project}}/global/networkEndpointGroups/{{networkEndpointGroup}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/compute/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + Represents a collection of network endpoints. A network endpoint group (NEG) defines how a set of endpoints should be reached, whether they are reachable, and where they are located. For more information about using NEGs for different use cases, see Network endpoint groups overview. + properties: + + - !ruby/object:Api::Type::String + name: 'kind' + description: | + [Output Only] Type of the resource. Always compute#networkEndpointGroup for network endpoint group. + - !ruby/object:Api::Type::String + name: 'id' + description: | + [Output Only] The unique identifier for the resource. This identifier is defined by the server. + - !ruby/object:Api::Type::String + name: 'creationTimestamp' + description: | + [Output Only] Creation timestamp in RFC3339 text format. + - !ruby/object:Api::Type::String + name: 'selfLink' + description: | + [Output Only] Server-defined URL for the resource. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. + - !ruby/object:Api::Type::String + name: 'description' + description: | + An optional description of this resource. Provide this property when you create the resource. + - !ruby/object:Api::Type::Enum + name: 'networkEndpointType' + description: | + Type of network endpoints in this network endpoint group. Can be one of GCE_VM_IP, GCE_VM_IP_PORT, NON_GCP_PRIVATE_IP_PORT, INTERNET_FQDN_PORT, INTERNET_IP_PORT, SERVERLESS, PRIVATE_SERVICE_CONNECT. + values: + - :GCE_VM_IP + - :GCE_VM_IP_PORT + - :INTERNET_FQDN_PORT + - :INTERNET_IP_PORT + - :NON_GCP_PRIVATE_IP_PORT + - :PRIVATE_SERVICE_CONNECT + - :SERVERLESS + - !ruby/object:Api::Type::Integer + name: 'size' + description: | + [Output only] Number of network endpoints in the network endpoint group. + - !ruby/object:Api::Type::String + name: 'region' + description: | + [Output Only] The URL of the region where the network endpoint group is located. + - !ruby/object:Api::Type::String + name: 'zone' + description: | + [Output Only] The URL of the zone where the network endpoint group is located. + - !ruby/object:Api::Type::String + name: 'network' + description: | + The URL of the network to which all network endpoints in the NEG belong. Uses "default" project network if unspecified. + - !ruby/object:Api::Type::String + name: 'subnetwork' + description: | + Optional URL of the subnetwork to which all network endpoints in the NEG belong. + - !ruby/object:Api::Type::Integer + name: 'defaultPort' + description: | + The default port used if the port number is not specified in the network endpoint. + - !ruby/object:Api::Type::NestedObject + name: 'annotations' + description: | + Metadata defined as annotations on the network endpoint group. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'cloudRun' + description: | + Configuration for a Cloud Run network endpoint group (NEG). The service must be provided explicitly or in the URL mask. The tag is optional, may be provided explicitly or in the URL mask. Note: Cloud Run service must be in the same project and located in the same region as the Serverless NEG. + properties: + - !ruby/object:Api::Type::String + name: 'service' + description: | + Cloud Run service is the main resource of Cloud Run. The service must be 1-63 characters long, and comply with RFC1035. Example value: "run-service". + - !ruby/object:Api::Type::String + name: 'tag' + description: | + Optional Cloud Run tag represents the "named-revision" to provide additional fine-grained traffic routing information. The tag must be 1-63 characters long, and comply with RFC1035. Example value: "revision-0010". + - !ruby/object:Api::Type::String + name: 'urlMask' + description: | + A template to parse and fields from a request URL. URL mask allows for routing to multiple Run services without having to create multiple network endpoint groups and backend services. For example, request URLs "foo1.domain.com/bar1" and "foo1.domain.com/bar2" can be backed by the same Serverless Network Endpoint Group (NEG) with URL mask ".domain.com/". The URL mask will parse them to { service="bar1", tag="foo1" } and { service="bar2", tag="foo2" } respectively. + - !ruby/object:Api::Type::NestedObject + name: 'appEngine' + description: | + Configuration for an App Engine network endpoint group (NEG). The service is optional, may be provided explicitly or in the URL mask. The version is optional and can only be provided explicitly or in the URL mask when service is present. Note: App Engine service must be in the same project and located in the same region as the Serverless NEG. + properties: + - !ruby/object:Api::Type::String + name: 'service' + description: | + Optional serving service. The service name is case-sensitive and must be 1-63 characters long. Example value: "default", "my-service". + - !ruby/object:Api::Type::String + name: 'version' + description: | + Optional serving version. The version name is case-sensitive and must be 1-100 characters long. Example value: "v1", "v2". + - !ruby/object:Api::Type::String + name: 'urlMask' + description: | + A template to parse service and version fields from a request URL. URL mask allows for routing to multiple App Engine services without having to create multiple Network Endpoint Groups and backend services. For example, the request URLs "foo1-dot-appname.appspot.com/v1" and "foo1-dot-appname.appspot.com/v2" can be backed by the same Serverless NEG with URL mask "-dot-appname.appspot.com/". The URL mask will parse them to { service = "foo1", version = "v1" } and { service = "foo1", version = "v2" } respectively. + - !ruby/object:Api::Type::NestedObject + name: 'cloudFunction' + description: | + Configuration for a Cloud Function network endpoint group (NEG). The function must be provided explicitly or in the URL mask. Note: Cloud Function must be in the same project and located in the same region as the Serverless NEG. + properties: + - !ruby/object:Api::Type::String + name: 'function' + description: | + A user-defined name of the Cloud Function. The function name is case-sensitive and must be 1-63 characters long. Example value: "func1". + - !ruby/object:Api::Type::String + name: 'urlMask' + description: | + A template to parse function field from a request URL. URL mask allows for routing to multiple Cloud Functions without having to create multiple Network Endpoint Groups and backend services. For example, request URLs " mydomain.com/function1" and "mydomain.com/function2" can be backed by the same Serverless NEG with URL mask "/". The URL mask will parse them to { function = "function1" } and { function = "function2" } respectively. + - !ruby/object:Api::Type::String + name: 'pscTargetService' + description: | + The target service url used to set up private service connection to a Google API or a PSC Producer Service Attachment. An example value is: "asia-northeast3-cloudkms.googleapis.com" + - !ruby/object:Api::Type::NestedObject + name: 'pscData' + description: | + All data that is specifically relevant to only network endpoint groups of type PRIVATE_SERVICE_CONNECT. + properties: + - !ruby/object:Api::Type::String + name: 'consumerPscAddress' + description: | + [Output Only] Address allocated from given subnetwork for PSC. This IP address acts as a VIP for a PSC NEG, allowing it to act as an endpoint in L7 PSC-XLB. + - !ruby/object:Api::Type::String + name: 'pscConnectionId' + description: | + [Output Only] The PSC connection id of the PSC Network Endpoint Group Consumer. + - !ruby/object:Api::Type::Enum + name: 'pscConnectionStatus' + description: | + [Output Only] The connection status of the PSC Forwarding Rule. + values: + - :ACCEPTED + - :CLOSED + - :NEEDS_ATTENTION + - :PENDING + - :REJECTED + - :STATUS_UNSPECIFIED + + + + + - !ruby/object:Api::Resource + name: NodeType + base_url: 'projects/{{project}}/zones/{{zone}}/nodeTypes' + self_link: 'projects/{{project}}/zones/{{zone}}/nodeTypes/{{nodeType}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/compute_v1/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + Represent a sole-tenant Node Type resource. Each node within a node group must have a node type. A node type specifies the total amount of cores and memory for that node. Currently, the only available node type is n1-node-96-624 node type that has 96 vCPUs and 624 GB of memory, available in multiple zones. For more information read Node types. + properties: + + - !ruby/object:Api::Type::String + name: 'kind' + description: | + [Output Only] The type of the resource. Always compute#nodeType for node types. + - !ruby/object:Api::Type::String + name: 'id' + description: | + [Output Only] The unique identifier for the resource. This identifier is defined by the server. + - !ruby/object:Api::Type::String + name: 'creationTimestamp' + description: | + [Output Only] Creation timestamp in RFC3339 text format. + - !ruby/object:Api::Type::String + name: 'name' + description: | + [Output Only] Name of the resource. + - !ruby/object:Api::Type::String + name: 'description' + description: | + [Output Only] An optional textual description of the resource. + - !ruby/object:Api::Type::String + name: 'cpuPlatform' + description: | + [Output Only] The CPU platform used by this node type. + - !ruby/object:Api::Type::Integer + name: 'guestCpus' + description: | + [Output Only] The number of virtual CPUs that are available to the node type. + - !ruby/object:Api::Type::Integer + name: 'memoryMb' + description: | + [Output Only] The amount of physical memory available to the node type, defined in MB. + - !ruby/object:Api::Type::Integer + name: 'localSsdGb' + description: | + [Output Only] Local SSD available to the node type, defined in GB. + - !ruby/object:Api::Type::NestedObject + name: 'deprecated' + description: | + Deprecation status for a public resource. + properties: + - !ruby/object:Api::Type::Enum + name: 'state' + description: | + The deprecation state of this resource. This can be ACTIVE, DEPRECATED, OBSOLETE, or DELETED. Operations which communicate the end of life date for an image, can use ACTIVE. Operations which create a new resource using a DEPRECATED resource will return successfully, but with a warning indicating the deprecated resource and recommending its replacement. Operations which use OBSOLETE or DELETED resources will be rejected and result in an error. + values: + - :ACTIVE + - :DELETED + - :DEPRECATED + - :OBSOLETE + - !ruby/object:Api::Type::String + name: 'replacement' + description: | + The URL of the suggested replacement for a deprecated resource. The suggested replacement resource must be the same kind of resource as the deprecated resource. + - !ruby/object:Api::Type::String + name: 'deprecated' + description: | + An optional RFC3339 timestamp on or after which the state of this resource is intended to change to DEPRECATED. This is only informational and the status will not change unless the client explicitly changes it. + - !ruby/object:Api::Type::String + name: 'obsolete' + description: | + An optional RFC3339 timestamp on or after which the state of this resource is intended to change to OBSOLETE. This is only informational and the status will not change unless the client explicitly changes it. + - !ruby/object:Api::Type::String + name: 'deleted' + description: | + An optional RFC3339 timestamp on or after which the state of this resource is intended to change to DELETED. This is only informational and the status will not change unless the client explicitly changes it. + - !ruby/object:Api::Type::String + name: 'zone' + description: | + [Output Only] The name of the zone where the node type resides, such as us-central1-a. + - !ruby/object:Api::Type::String + name: 'selfLink' + description: | + [Output Only] Server-defined URL for the resource. + + + + + - !ruby/object:Api::Resource + name: RegionDiskType + base_url: 'projects/{{project}}/regions/{{region}}/diskTypes' + self_link: 'projects/{{project}}/regions/{{region}}/diskTypes/{{diskType}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/compute/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + Represents a Disk Type resource. Google Compute Engine has two Disk Type resources: * [Regional](/compute/docs/reference/rest/v1/regionDiskTypes) * [Zonal](/compute/docs/reference/rest/v1/diskTypes) You can choose from a variety of disk types based on your needs. For more information, read Storage options. The diskTypes resource represents disk types for a zonal persistent disk. For more information, read Zonal persistent disks. The regionDiskTypes resource represents disk types for a regional persistent disk. For more information, read Regional persistent disks. + properties: + + - !ruby/object:Api::Type::String + name: 'kind' + description: | + [Output Only] Type of the resource. Always compute#diskType for disk types. + - !ruby/object:Api::Type::String + name: 'id' + description: | + [Output Only] The unique identifier for the resource. This identifier is defined by the server. + - !ruby/object:Api::Type::String + name: 'creationTimestamp' + description: | + [Output Only] Creation timestamp in RFC3339 text format. + - !ruby/object:Api::Type::String + name: 'name' + description: | + [Output Only] Name of the resource. + - !ruby/object:Api::Type::String + name: 'description' + description: | + [Output Only] An optional description of this resource. + - !ruby/object:Api::Type::String + name: 'validDiskSize' + description: | + [Output Only] An optional textual description of the valid disk size, such as "10GB-10TB". + - !ruby/object:Api::Type::NestedObject + name: 'deprecated' + description: | + Deprecation status for a public resource. + properties: + - !ruby/object:Api::Type::Enum + name: 'state' + description: | + The deprecation state of this resource. This can be ACTIVE, DEPRECATED, OBSOLETE, or DELETED. Operations which communicate the end of life date for an image, can use ACTIVE. Operations which create a new resource using a DEPRECATED resource will return successfully, but with a warning indicating the deprecated resource and recommending its replacement. Operations which use OBSOLETE or DELETED resources will be rejected and result in an error. + values: + - :ACTIVE + - :DELETED + - :DEPRECATED + - :OBSOLETE + - !ruby/object:Api::Type::String + name: 'replacement' + description: | + The URL of the suggested replacement for a deprecated resource. The suggested replacement resource must be the same kind of resource as the deprecated resource. + - !ruby/object:Api::Type::String + name: 'deprecated' + description: | + An optional RFC3339 timestamp on or after which the state of this resource is intended to change to DEPRECATED. This is only informational and the status will not change unless the client explicitly changes it. + - !ruby/object:Api::Type::String + name: 'obsolete' + description: | + An optional RFC3339 timestamp on or after which the state of this resource is intended to change to OBSOLETE. This is only informational and the status will not change unless the client explicitly changes it. + - !ruby/object:Api::Type::String + name: 'deleted' + description: | + An optional RFC3339 timestamp on or after which the state of this resource is intended to change to DELETED. This is only informational and the status will not change unless the client explicitly changes it. + - !ruby/object:Api::Type::String + name: 'zone' + description: | + [Output Only] URL of the zone where the disk type resides. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body. + - !ruby/object:Api::Type::String + name: 'selfLink' + description: | + [Output Only] Server-defined URL for the resource. + - !ruby/object:Api::Type::String + name: 'defaultDiskSizeGb' + description: | + [Output Only] Server-defined default disk size in GB. + - !ruby/object:Api::Type::String + name: 'region' + description: | + [Output Only] URL of the region where the disk type resides. Only applicable for regional resources. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body. + + + + + - !ruby/object:Api::Resource + name: RegionDiskType + base_url: 'projects/{{project}}/regions/{{region}}/diskTypes' + self_link: 'projects/{{project}}/regions/{{region}}/diskTypes/{{diskType}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/compute/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + Represents a Disk Type resource. Google Compute Engine has two Disk Type resources: * [Regional](/compute/docs/reference/rest/v1/regionDiskTypes) * [Zonal](/compute/docs/reference/rest/v1/diskTypes) You can choose from a variety of disk types based on your needs. For more information, read Storage options. The diskTypes resource represents disk types for a zonal persistent disk. For more information, read Zonal persistent disks. The regionDiskTypes resource represents disk types for a regional persistent disk. For more information, read Regional persistent disks. + properties: + + - !ruby/object:Api::Type::String + name: 'kind' + description: | + [Output Only] Type of the resource. Always compute#diskType for disk types. + - !ruby/object:Api::Type::String + name: 'id' + description: | + [Output Only] The unique identifier for the resource. This identifier is defined by the server. + - !ruby/object:Api::Type::String + name: 'creationTimestamp' + description: | + [Output Only] Creation timestamp in RFC3339 text format. + - !ruby/object:Api::Type::String + name: 'name' + description: | + [Output Only] Name of the resource. + - !ruby/object:Api::Type::String + name: 'description' + description: | + [Output Only] An optional description of this resource. + - !ruby/object:Api::Type::String + name: 'validDiskSize' + description: | + [Output Only] An optional textual description of the valid disk size, such as "10GB-10TB". + - !ruby/object:Api::Type::NestedObject + name: 'deprecated' + description: | + Deprecation status for a public resource. + properties: + - !ruby/object:Api::Type::Enum + name: 'state' + description: | + The deprecation state of this resource. This can be ACTIVE, DEPRECATED, OBSOLETE, or DELETED. Operations which communicate the end of life date for an image, can use ACTIVE. Operations which create a new resource using a DEPRECATED resource will return successfully, but with a warning indicating the deprecated resource and recommending its replacement. Operations which use OBSOLETE or DELETED resources will be rejected and result in an error. + values: + - :ACTIVE + - :DELETED + - :DEPRECATED + - :OBSOLETE + - !ruby/object:Api::Type::String + name: 'replacement' + description: | + The URL of the suggested replacement for a deprecated resource. The suggested replacement resource must be the same kind of resource as the deprecated resource. + - !ruby/object:Api::Type::String + name: 'deprecated' + description: | + An optional RFC3339 timestamp on or after which the state of this resource is intended to change to DEPRECATED. This is only informational and the status will not change unless the client explicitly changes it. + - !ruby/object:Api::Type::String + name: 'obsolete' + description: | + An optional RFC3339 timestamp on or after which the state of this resource is intended to change to OBSOLETE. This is only informational and the status will not change unless the client explicitly changes it. + - !ruby/object:Api::Type::String + name: 'deleted' + description: | + An optional RFC3339 timestamp on or after which the state of this resource is intended to change to DELETED. This is only informational and the status will not change unless the client explicitly changes it. + - !ruby/object:Api::Type::String + name: 'zone' + description: | + [Output Only] URL of the zone where the disk type resides. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body. + - !ruby/object:Api::Type::String + name: 'selfLink' + description: | + [Output Only] Server-defined URL for the resource. + - !ruby/object:Api::Type::String + name: 'defaultDiskSizeGb' + description: | + [Output Only] Server-defined default disk size in GB. + - !ruby/object:Api::Type::String + name: 'region' + description: | + [Output Only] URL of the region where the disk type resides. Only applicable for regional resources. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body. + + + + + - !ruby/object:Api::Resource + name: PacketMirroring + base_url: 'projects/{{project}}/regions/{{region}}/packetMirrorings' + self_link: 'projects/{{project}}/regions/{{region}}/packetMirrorings/{{packetMirroring}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/compute/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + Represents a Packet Mirroring resource. Packet Mirroring clones the traffic of specified instances in your Virtual Private Cloud (VPC) network and forwards it to a collector destination, such as an instance group of an internal TCP/UDP load balancer, for analysis or examination. For more information about setting up Packet Mirroring, see Using Packet Mirroring. + properties: + + - !ruby/object:Api::Type::String + name: 'kind' + description: | + [Output Only] Type of the resource. Always compute#packetMirroring for packet mirrorings. + - !ruby/object:Api::Type::String + name: 'id' + description: | + [Output Only] The unique identifier for the resource. This identifier is defined by the server. + - !ruby/object:Api::Type::String + name: 'creationTimestamp' + description: | + [Output Only] Creation timestamp in RFC3339 text format. + - !ruby/object:Api::Type::String + name: 'selfLink' + description: | + [Output Only] Server-defined URL for the resource. + - !ruby/object:Api::Type::String + name: 'selfLinkWithId' + description: | + [Output Only] Server-defined URL for this resource with the resource id. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. + - !ruby/object:Api::Type::String + name: 'description' + description: | + An optional description of this resource. Provide this property when you create the resource. + - !ruby/object:Api::Type::String + name: 'region' + description: | + [Output Only] URI of the region where the packetMirroring resides. + - !ruby/object:Api::Type::NestedObject + name: 'network' + description: | + + properties: + - !ruby/object:Api::Type::String + name: 'url' + description: | + URL of the network resource. + - !ruby/object:Api::Type::String + name: 'canonicalUrl' + description: | + [Output Only] Unique identifier for the network; defined by the server. + - !ruby/object:Api::Type::Integer + name: 'priority' + description: | + The priority of applying this configuration. Priority is used to break ties in cases where there is more than one matching rule. In the case of two rules that apply for a given Instance, the one with the lowest-numbered priority value wins. Default value is 1000. Valid range is 0 through 65535. + - !ruby/object:Api::Type::NestedObject + name: 'collectorIlb' + description: | + + properties: + - !ruby/object:Api::Type::String + name: 'url' + description: | + Resource URL to the forwarding rule representing the ILB configured as destination of the mirrored traffic. + - !ruby/object:Api::Type::String + name: 'canonicalUrl' + description: | + [Output Only] Unique identifier for the forwarding rule; defined by the server. + - !ruby/object:Api::Type::NestedObject + name: 'mirroredResources' + description: | + + properties: + - !ruby/object:Api::Type::Array + name: 'subnetworks' + description: | + A set of subnetworks for which traffic from/to all VM instances will be mirrored. They must live in the same region as this packetMirroring. You may specify a maximum of 5 subnetworks. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'url' + description: | + Resource URL to the subnetwork for which traffic from/to all VM instances will be mirrored. + - !ruby/object:Api::Type::String + name: 'canonicalUrl' + description: | + [Output Only] Unique identifier for the subnetwork; defined by the server. + - !ruby/object:Api::Type::Array + name: 'instances' + description: | + A set of virtual machine instances that are being mirrored. They must live in zones contained in the same region as this packetMirroring. Note that this config will apply only to those network interfaces of the Instances that belong to the network specified in this packetMirroring. You may specify a maximum of 50 Instances. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'url' + description: | + Resource URL to the virtual machine instance which is being mirrored. + - !ruby/object:Api::Type::String + name: 'canonicalUrl' + description: | + [Output Only] Unique identifier for the instance; defined by the server. + - !ruby/object:Api::Type::Array + name: 'tags' + description: | + A set of mirrored tags. Traffic from/to all VM instances that have one or more of these tags will be mirrored. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'filter' + description: | + + properties: + - !ruby/object:Api::Type::Array + name: 'cidrRanges' + description: | + One or more IPv4 or IPv6 CIDR ranges that apply as filter on the source (ingress) or destination (egress) IP in the IP header. If no ranges are specified, all IPv4 traffic that matches the specified IPProtocols is mirrored. If neither cidrRanges nor IPProtocols is specified, all IPv4 traffic is mirrored. To mirror all IPv4 and IPv6 traffic, use "0.0.0.0/0,::/0". Note: Support for IPv6 traffic is in preview. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'IPProtocols' + description: | + Protocols that apply as filter on mirrored traffic. If no protocols are specified, all traffic that matches the specified CIDR ranges is mirrored. If neither cidrRanges nor IPProtocols is specified, all IPv4 traffic is mirrored. + item_type: Api::Type::String + - !ruby/object:Api::Type::Enum + name: 'direction' + description: | + Direction of traffic to mirror, either INGRESS, EGRESS, or BOTH. The default is BOTH. + values: + - :BOTH + - :EGRESS + - :INGRESS + - !ruby/object:Api::Type::Enum + name: 'enable' + description: | + Indicates whether or not this packet mirroring takes effect. If set to FALSE, this packet mirroring policy will not be enforced on the network. The default is TRUE. + values: + - :FALSE + - :TRUE + + + + + - !ruby/object:Api::Resource + name: PacketMirroring + base_url: 'projects/{{project}}/regions/{{region}}/packetMirrorings' + self_link: 'projects/{{project}}/regions/{{region}}/packetMirrorings/{{packetMirroring}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/compute/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + Represents a Packet Mirroring resource. Packet Mirroring clones the traffic of specified instances in your Virtual Private Cloud (VPC) network and forwards it to a collector destination, such as an instance group of an internal TCP/UDP load balancer, for analysis or examination. For more information about setting up Packet Mirroring, see Using Packet Mirroring. + properties: + + - !ruby/object:Api::Type::String + name: 'kind' + description: | + [Output Only] Type of the resource. Always compute#packetMirroring for packet mirrorings. + - !ruby/object:Api::Type::String + name: 'id' + description: | + [Output Only] The unique identifier for the resource. This identifier is defined by the server. + - !ruby/object:Api::Type::String + name: 'creationTimestamp' + description: | + [Output Only] Creation timestamp in RFC3339 text format. + - !ruby/object:Api::Type::String + name: 'selfLink' + description: | + [Output Only] Server-defined URL for the resource. + - !ruby/object:Api::Type::String + name: 'selfLinkWithId' + description: | + [Output Only] Server-defined URL for this resource with the resource id. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. + - !ruby/object:Api::Type::String + name: 'description' + description: | + An optional description of this resource. Provide this property when you create the resource. + - !ruby/object:Api::Type::String + name: 'region' + description: | + [Output Only] URI of the region where the packetMirroring resides. + - !ruby/object:Api::Type::NestedObject + name: 'network' + description: | + + properties: + - !ruby/object:Api::Type::String + name: 'url' + description: | + URL of the network resource. + - !ruby/object:Api::Type::String + name: 'canonicalUrl' + description: | + [Output Only] Unique identifier for the network; defined by the server. + - !ruby/object:Api::Type::Integer + name: 'priority' + description: | + The priority of applying this configuration. Priority is used to break ties in cases where there is more than one matching rule. In the case of two rules that apply for a given Instance, the one with the lowest-numbered priority value wins. Default value is 1000. Valid range is 0 through 65535. + - !ruby/object:Api::Type::NestedObject + name: 'collectorIlb' + description: | + + properties: + - !ruby/object:Api::Type::String + name: 'url' + description: | + Resource URL to the forwarding rule representing the ILB configured as destination of the mirrored traffic. + - !ruby/object:Api::Type::String + name: 'canonicalUrl' + description: | + [Output Only] Unique identifier for the forwarding rule; defined by the server. + - !ruby/object:Api::Type::NestedObject + name: 'mirroredResources' + description: | + + properties: + - !ruby/object:Api::Type::Array + name: 'subnetworks' + description: | + A set of subnetworks for which traffic from/to all VM instances will be mirrored. They must live in the same region as this packetMirroring. You may specify a maximum of 5 subnetworks. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'url' + description: | + Resource URL to the subnetwork for which traffic from/to all VM instances will be mirrored. + - !ruby/object:Api::Type::String + name: 'canonicalUrl' + description: | + [Output Only] Unique identifier for the subnetwork; defined by the server. + - !ruby/object:Api::Type::Array + name: 'instances' + description: | + A set of virtual machine instances that are being mirrored. They must live in zones contained in the same region as this packetMirroring. Note that this config will apply only to those network interfaces of the Instances that belong to the network specified in this packetMirroring. You may specify a maximum of 50 Instances. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'url' + description: | + Resource URL to the virtual machine instance which is being mirrored. + - !ruby/object:Api::Type::String + name: 'canonicalUrl' + description: | + [Output Only] Unique identifier for the instance; defined by the server. + - !ruby/object:Api::Type::Array + name: 'tags' + description: | + A set of mirrored tags. Traffic from/to all VM instances that have one or more of these tags will be mirrored. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'filter' + description: | + + properties: + - !ruby/object:Api::Type::Array + name: 'cidrRanges' + description: | + One or more IPv4 or IPv6 CIDR ranges that apply as filter on the source (ingress) or destination (egress) IP in the IP header. If no ranges are specified, all IPv4 traffic that matches the specified IPProtocols is mirrored. If neither cidrRanges nor IPProtocols is specified, all IPv4 traffic is mirrored. To mirror all IPv4 and IPv6 traffic, use "0.0.0.0/0,::/0". Note: Support for IPv6 traffic is in preview. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'IPProtocols' + description: | + Protocols that apply as filter on mirrored traffic. If no protocols are specified, all traffic that matches the specified CIDR ranges is mirrored. If neither cidrRanges nor IPProtocols is specified, all IPv4 traffic is mirrored. + item_type: Api::Type::String + - !ruby/object:Api::Type::Enum + name: 'direction' + description: | + Direction of traffic to mirror, either INGRESS, EGRESS, or BOTH. The default is BOTH. + values: + - :BOTH + - :EGRESS + - :INGRESS + - !ruby/object:Api::Type::Enum + name: 'enable' + description: | + Indicates whether or not this packet mirroring takes effect. If set to FALSE, this packet mirroring policy will not be enforced on the network. The default is TRUE. + values: + - :FALSE + - :TRUE + + + + + - !ruby/object:Api::Resource + name: TargetVpnGateway + base_url: 'projects/{{project}}/regions/{{region}}/targetVpnGateways' + self_link: 'projects/{{project}}/regions/{{region}}/targetVpnGateways/{{targetVpnGateway}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/compute/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + Represents a Target VPN Gateway resource. The target VPN gateway resource represents a Classic Cloud VPN gateway. For more information, read the the Cloud VPN Overview. + properties: + + - !ruby/object:Api::Type::String + name: 'kind' + description: | + [Output Only] Type of resource. Always compute#targetVpnGateway for target VPN gateways. + - !ruby/object:Api::Type::String + name: 'id' + description: | + [Output Only] The unique identifier for the resource. This identifier is defined by the server. + - !ruby/object:Api::Type::String + name: 'creationTimestamp' + description: | + [Output Only] Creation timestamp in RFC3339 text format. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. + - !ruby/object:Api::Type::String + name: 'description' + description: | + An optional description of this resource. Provide this property when you create the resource. + - !ruby/object:Api::Type::String + name: 'region' + description: | + [Output Only] URL of the region where the target VPN gateway resides. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body. + - !ruby/object:Api::Type::String + name: 'network' + description: | + URL of the network to which this VPN gateway is attached. Provided by the client when the VPN gateway is created. + - !ruby/object:Api::Type::Array + name: 'tunnels' + description: | + [Output Only] A list of URLs to VpnTunnel resources. VpnTunnels are created using the compute.vpntunnels.insert method and associated with a VPN gateway. + item_type: Api::Type::String + - !ruby/object:Api::Type::Enum + name: 'status' + description: | + [Output Only] The status of the VPN gateway, which can be one of the following: CREATING, READY, FAILED, or DELETING. + values: + - :CREATING + - :DELETING + - :FAILED + - :READY + - !ruby/object:Api::Type::String + name: 'selfLink' + description: | + [Output Only] Server-defined URL for the resource. + - !ruby/object:Api::Type::Array + name: 'forwardingRules' + description: | + [Output Only] A list of URLs to the ForwardingRule resources. ForwardingRules are created using compute.forwardingRules.insert and associated with a VPN gateway. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + Labels for this resource. These can only be added or modified by the setLabels method. Each label key/value pair must comply with RFC1035. Label values may be empty. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::String + name: 'labelFingerprint' + description: | + A fingerprint for the labels being applied to this TargetVpnGateway, which is essentially a hash of the labels set used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels, otherwise the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make a get() request to retrieve a TargetVpnGateway. + + + + + - !ruby/object:Api::Resource + name: RegionSslPolicy + base_url: 'projects/{{project}}/regions/{{region}}/sslPolicies' + self_link: 'projects/{{project}}/regions/{{region}}/sslPolicies/{{sslPolicy}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/compute/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + Represents an SSL Policy resource. Use SSL policies to control SSL features, such as versions and cipher suites, that are offered by Application Load Balancers and proxy Network Load Balancers. For more information, read SSL policies overview. + properties: + + - !ruby/object:Api::Type::String + name: 'kind' + description: | + [Output only] Type of the resource. Always compute#sslPolicyfor SSL policies. + - !ruby/object:Api::Type::String + name: 'id' + description: | + [Output Only] The unique identifier for the resource. This identifier is defined by the server. + - !ruby/object:Api::Type::String + name: 'creationTimestamp' + description: | + [Output Only] Creation timestamp in RFC3339 text format. + - !ruby/object:Api::Type::String + name: 'selfLink' + description: | + [Output Only] Server-defined URL for the resource. + - !ruby/object:Api::Type::String + name: 'selfLinkWithId' + description: | + [Output Only] Server-defined URL for this resource with the resource id. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Name of the resource. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. + - !ruby/object:Api::Type::String + name: 'description' + description: | + An optional description of this resource. Provide this property when you create the resource. + - !ruby/object:Api::Type::Enum + name: 'profile' + description: | + Profile specifies the set of SSL features that can be used by the load balancer when negotiating SSL with clients. This can be one of COMPATIBLE, MODERN, RESTRICTED, or CUSTOM. If using CUSTOM, the set of SSL features to enable must be specified in the customFeatures field. + values: + - :COMPATIBLE + - :CUSTOM + - :MODERN + - :RESTRICTED + - !ruby/object:Api::Type::Enum + name: 'minTlsVersion' + description: | + The minimum version of SSL protocol that can be used by the clients to establish a connection with the load balancer. This can be one of TLS_1_0, TLS_1_1, TLS_1_2. + values: + - :TLS_1_0 + - :TLS_1_1 + - :TLS_1_2 + - !ruby/object:Api::Type::Array + name: 'enabledFeatures' + description: | + [Output Only] The list of features enabled in the SSL policy. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'customFeatures' + description: | + A list of features enabled when the selected profile is CUSTOM. The method returns the set of features that can be specified in this list. This field must be empty if the profile is not CUSTOM. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'fingerprint' + description: | + Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. This field will be ignored when inserting a SslPolicy. An up-to-date fingerprint must be provided in order to update the SslPolicy, otherwise the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make a get() request to retrieve an SslPolicy. + - !ruby/object:Api::Type::Array + name: 'warnings' + description: | + [Output Only] If potential misconfigurations are detected for this SSL policy, this field will be populated with warning messages. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::Enum + name: 'code' + description: | + [Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response. + values: + - :CLEANUP_FAILED + - :DEPRECATED_RESOURCE_USED + - :DEPRECATED_TYPE_USED + - :DISK_SIZE_LARGER_THAN_IMAGE_SIZE + - :EXPERIMENTAL_TYPE_USED + - :EXTERNAL_API_WARNING + - :FIELD_VALUE_OVERRIDEN + - :INJECTED_KERNELS_DEPRECATED + - :INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB + - :LARGE_DEPLOYMENT_WARNING + - :LIST_OVERHEAD_QUOTA_EXCEED + - :MISSING_TYPE_DEPENDENCY + - :NEXT_HOP_ADDRESS_NOT_ASSIGNED + - :NEXT_HOP_CANNOT_IP_FORWARD + - :NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE + - :NEXT_HOP_INSTANCE_NOT_FOUND + - :NEXT_HOP_INSTANCE_NOT_ON_NETWORK + - :NEXT_HOP_NOT_RUNNING + - :NOT_CRITICAL_ERROR + - :NO_RESULTS_ON_PAGE + - :PARTIAL_SUCCESS + - :REQUIRED_TOS_AGREEMENT + - :RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING + - :RESOURCE_NOT_DELETED + - :SCHEMA_VALIDATION_IGNORED + - :SINGLE_INSTANCE_PROPERTY_TEMPLATE + - :UNDECLARED_PROPERTIES + - :UNREACHABLE + - !ruby/object:Api::Type::String + name: 'message' + description: | + [Output Only] A human-readable description of the warning code. + - !ruby/object:Api::Type::Array + name: 'data' + description: | + [Output Only] Metadata about this warning in key: value format. For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" } + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'key' + description: | + [Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding). + - !ruby/object:Api::Type::String + name: 'value' + description: | + [Output Only] A warning data value corresponding to the key. + - !ruby/object:Api::Type::NestedObject + name: 'tlsSettings' + description: | + The TLS settings for the server. + properties: + - !ruby/object:Api::Type::Enum + name: 'tlsMode' + description: | + Indicates whether connections should be secured using TLS. The value of this field determines how TLS is enforced. This field can be set to one of the following: - SIMPLE Secure connections with standard TLS semantics. - MUTUAL Secure connections to the backends using mutual TLS by presenting client certificates for authentication. + values: + - :INVALID + - :MUTUAL + - :SIMPLE + - !ruby/object:Api::Type::NestedObject + name: 'proxyTlsContext' + description: | + [Deprecated] The TLS settings for the client or server. The TLS settings for the client or server. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'certificateContext' + description: | + [Deprecated] Defines the mechanism to obtain the client or server certificate. Defines the mechanism to obtain the client or server certificate. + properties: + - !ruby/object:Api::Type::Enum + name: 'certificateSource' + description: | + Defines how TLS certificates are obtained. + values: + - :INVALID + - :USE_PATH + - :USE_SDS + - !ruby/object:Api::Type::NestedObject + name: 'certificatePaths' + description: | + [Deprecated] The paths to the mounted TLS Certificates and private key. The paths to the mounted TLS Certificates and private key. + properties: + - !ruby/object:Api::Type::String + name: 'certificatePath' + description: | + The path to the file holding the client or server TLS certificate to use. + - !ruby/object:Api::Type::String + name: 'privateKeyPath' + description: | + The path to the file holding the client or server private key. + - !ruby/object:Api::Type::NestedObject + name: 'sdsConfig' + description: | + [Deprecated] The configuration to access the SDS server. The configuration to access the SDS server. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'grpcServiceConfig' + description: | + [Deprecated] gRPC config to access the SDS server. gRPC config to access the SDS server. + properties: + - !ruby/object:Api::Type::String + name: 'targetUri' + description: | + The target URI of the SDS server. + - !ruby/object:Api::Type::NestedObject + name: 'channelCredentials' + description: | + [Deprecated] gRPC channel credentials to access the SDS server. gRPC channel credentials to access the SDS server. + properties: + - !ruby/object:Api::Type::Enum + name: 'channelCredentialType' + description: | + The channel credentials to access the SDS server. This field can be set to one of the following: CERTIFICATES: Use TLS certificates to access the SDS server. GCE_VM: Use local GCE VM credentials to access the SDS server. + values: + - :CERTIFICATES + - :GCE_VM + - :INVALID + - !ruby/object:Api::Type::NestedObject + name: 'certificates' + description: | + [Deprecated] The paths to the mounted TLS Certificates and private key. The paths to the mounted TLS Certificates and private key. + properties: + - !ruby/object:Api::Type::String + name: 'certificatePath' + description: | + The path to the file holding the client or server TLS certificate to use. + - !ruby/object:Api::Type::String + name: 'privateKeyPath' + description: | + The path to the file holding the client or server private key. + - !ruby/object:Api::Type::NestedObject + name: 'callCredentials' + description: | + [Deprecated] gRPC call credentials to access the SDS server. gRPC call credentials to access the SDS server. + properties: + - !ruby/object:Api::Type::Enum + name: 'callCredentialType' + description: | + The type of call credentials to use for GRPC requests to the SDS server. This field can be set to one of the following: - GCE_VM: The local GCE VM service account credentials are used to access the SDS server. - FROM_PLUGIN: Custom authenticator credentials are used to access the SDS server. + values: + - :FROM_PLUGIN + - :GCE_VM + - :INVALID + - !ruby/object:Api::Type::NestedObject + name: 'fromPlugin' + description: | + [Deprecated] Custom authenticator credentials. Custom authenticator credentials. + properties: + - !ruby/object:Api::Type::String + name: 'name' + description: | + Plugin name. + - !ruby/object:Api::Type::String + name: 'structConfig' + description: | + A text proto that conforms to a Struct type definition interpreted by the plugin. + - !ruby/object:Api::Type::NestedObject + name: 'validationContext' + description: | + [Deprecated] Defines the mechanism to obtain the Certificate Authority certificate to validate the client/server certificate. validate the client/server certificate. + properties: + - !ruby/object:Api::Type::Enum + name: 'validationSource' + description: | + Defines how TLS certificates are obtained. + values: + - :INVALID + - :USE_PATH + - :USE_SDS + - !ruby/object:Api::Type::String + name: 'certificatePath' + description: | + The path to the file holding the CA certificate to validate the client or server certificate. + - !ruby/object:Api::Type::NestedObject + name: 'sdsConfig' + description: | + [Deprecated] The configuration to access the SDS server. The configuration to access the SDS server. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'grpcServiceConfig' + description: | + [Deprecated] gRPC config to access the SDS server. gRPC config to access the SDS server. + properties: + - !ruby/object:Api::Type::String + name: 'targetUri' + description: | + The target URI of the SDS server. + - !ruby/object:Api::Type::NestedObject + name: 'channelCredentials' + description: | + [Deprecated] gRPC channel credentials to access the SDS server. gRPC channel credentials to access the SDS server. + properties: + - !ruby/object:Api::Type::Enum + name: 'channelCredentialType' + description: | + The channel credentials to access the SDS server. This field can be set to one of the following: CERTIFICATES: Use TLS certificates to access the SDS server. GCE_VM: Use local GCE VM credentials to access the SDS server. + values: + - :CERTIFICATES + - :GCE_VM + - :INVALID + - !ruby/object:Api::Type::NestedObject + name: 'certificates' + description: | + [Deprecated] The paths to the mounted TLS Certificates and private key. The paths to the mounted TLS Certificates and private key. + properties: + - !ruby/object:Api::Type::String + name: 'certificatePath' + description: | + The path to the file holding the client or server TLS certificate to use. + - !ruby/object:Api::Type::String + name: 'privateKeyPath' + description: | + The path to the file holding the client or server private key. + - !ruby/object:Api::Type::NestedObject + name: 'callCredentials' + description: | + [Deprecated] gRPC call credentials to access the SDS server. gRPC call credentials to access the SDS server. + properties: + - !ruby/object:Api::Type::Enum + name: 'callCredentialType' + description: | + The type of call credentials to use for GRPC requests to the SDS server. This field can be set to one of the following: - GCE_VM: The local GCE VM service account credentials are used to access the SDS server. - FROM_PLUGIN: Custom authenticator credentials are used to access the SDS server. + values: + - :FROM_PLUGIN + - :GCE_VM + - :INVALID + - !ruby/object:Api::Type::NestedObject + name: 'fromPlugin' + description: | + [Deprecated] Custom authenticator credentials. Custom authenticator credentials. + properties: + - !ruby/object:Api::Type::String + name: 'name' + description: | + Plugin name. + - !ruby/object:Api::Type::String + name: 'structConfig' + description: | + A text proto that conforms to a Struct type definition interpreted by the plugin. + - !ruby/object:Api::Type::Array + name: 'subjectAltNames' + description: | + A list of alternate names to verify the subject identity in the certificate presented by the client. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'region' + description: | + [Output Only] URL of the region where the regional SSL policy resides. This field is not applicable to global SSL policies. + + + + + - !ruby/object:Api::Resource + name: RegionSslPolicy + base_url: 'projects/{{project}}/regions/{{region}}/sslPolicies' + self_link: 'projects/{{project}}/regions/{{region}}/sslPolicies/{{sslPolicy}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/compute/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + Represents an SSL Policy resource. Use SSL policies to control SSL features, such as versions and cipher suites, that are offered by Application Load Balancers and proxy Network Load Balancers. For more information, read SSL policies overview. + properties: + + - !ruby/object:Api::Type::String + name: 'kind' + description: | + [Output only] Type of the resource. Always compute#sslPolicyfor SSL policies. + - !ruby/object:Api::Type::String + name: 'id' + description: | + [Output Only] The unique identifier for the resource. This identifier is defined by the server. + - !ruby/object:Api::Type::String + name: 'creationTimestamp' + description: | + [Output Only] Creation timestamp in RFC3339 text format. + - !ruby/object:Api::Type::String + name: 'selfLink' + description: | + [Output Only] Server-defined URL for the resource. + - !ruby/object:Api::Type::String + name: 'selfLinkWithId' + description: | + [Output Only] Server-defined URL for this resource with the resource id. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Name of the resource. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. + - !ruby/object:Api::Type::String + name: 'description' + description: | + An optional description of this resource. Provide this property when you create the resource. + - !ruby/object:Api::Type::Enum + name: 'profile' + description: | + Profile specifies the set of SSL features that can be used by the load balancer when negotiating SSL with clients. This can be one of COMPATIBLE, MODERN, RESTRICTED, or CUSTOM. If using CUSTOM, the set of SSL features to enable must be specified in the customFeatures field. + values: + - :COMPATIBLE + - :CUSTOM + - :MODERN + - :RESTRICTED + - !ruby/object:Api::Type::Enum + name: 'minTlsVersion' + description: | + The minimum version of SSL protocol that can be used by the clients to establish a connection with the load balancer. This can be one of TLS_1_0, TLS_1_1, TLS_1_2. + values: + - :TLS_1_0 + - :TLS_1_1 + - :TLS_1_2 + - !ruby/object:Api::Type::Array + name: 'enabledFeatures' + description: | + [Output Only] The list of features enabled in the SSL policy. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'customFeatures' + description: | + A list of features enabled when the selected profile is CUSTOM. The method returns the set of features that can be specified in this list. This field must be empty if the profile is not CUSTOM. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'fingerprint' + description: | + Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. This field will be ignored when inserting a SslPolicy. An up-to-date fingerprint must be provided in order to update the SslPolicy, otherwise the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make a get() request to retrieve an SslPolicy. + - !ruby/object:Api::Type::Array + name: 'warnings' + description: | + [Output Only] If potential misconfigurations are detected for this SSL policy, this field will be populated with warning messages. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::Enum + name: 'code' + description: | + [Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response. + values: + - :CLEANUP_FAILED + - :DEPRECATED_RESOURCE_USED + - :DEPRECATED_TYPE_USED + - :DISK_SIZE_LARGER_THAN_IMAGE_SIZE + - :EXPERIMENTAL_TYPE_USED + - :EXTERNAL_API_WARNING + - :FIELD_VALUE_OVERRIDEN + - :INJECTED_KERNELS_DEPRECATED + - :INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB + - :LARGE_DEPLOYMENT_WARNING + - :LIST_OVERHEAD_QUOTA_EXCEED + - :MISSING_TYPE_DEPENDENCY + - :NEXT_HOP_ADDRESS_NOT_ASSIGNED + - :NEXT_HOP_CANNOT_IP_FORWARD + - :NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE + - :NEXT_HOP_INSTANCE_NOT_FOUND + - :NEXT_HOP_INSTANCE_NOT_ON_NETWORK + - :NEXT_HOP_NOT_RUNNING + - :NOT_CRITICAL_ERROR + - :NO_RESULTS_ON_PAGE + - :PARTIAL_SUCCESS + - :REQUIRED_TOS_AGREEMENT + - :RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING + - :RESOURCE_NOT_DELETED + - :SCHEMA_VALIDATION_IGNORED + - :SINGLE_INSTANCE_PROPERTY_TEMPLATE + - :UNDECLARED_PROPERTIES + - :UNREACHABLE + - !ruby/object:Api::Type::String + name: 'message' + description: | + [Output Only] A human-readable description of the warning code. + - !ruby/object:Api::Type::Array + name: 'data' + description: | + [Output Only] Metadata about this warning in key: value format. For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" } + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'key' + description: | + [Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding). + - !ruby/object:Api::Type::String + name: 'value' + description: | + [Output Only] A warning data value corresponding to the key. + - !ruby/object:Api::Type::NestedObject + name: 'tlsSettings' + description: | + The TLS settings for the server. + properties: + - !ruby/object:Api::Type::Enum + name: 'tlsMode' + description: | + Indicates whether connections should be secured using TLS. The value of this field determines how TLS is enforced. This field can be set to one of the following: - SIMPLE Secure connections with standard TLS semantics. - MUTUAL Secure connections to the backends using mutual TLS by presenting client certificates for authentication. + values: + - :INVALID + - :MUTUAL + - :SIMPLE + - !ruby/object:Api::Type::NestedObject + name: 'proxyTlsContext' + description: | + [Deprecated] The TLS settings for the client or server. The TLS settings for the client or server. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'certificateContext' + description: | + [Deprecated] Defines the mechanism to obtain the client or server certificate. Defines the mechanism to obtain the client or server certificate. + properties: + - !ruby/object:Api::Type::Enum + name: 'certificateSource' + description: | + Defines how TLS certificates are obtained. + values: + - :INVALID + - :USE_PATH + - :USE_SDS + - !ruby/object:Api::Type::NestedObject + name: 'certificatePaths' + description: | + [Deprecated] The paths to the mounted TLS Certificates and private key. The paths to the mounted TLS Certificates and private key. + properties: + - !ruby/object:Api::Type::String + name: 'certificatePath' + description: | + The path to the file holding the client or server TLS certificate to use. + - !ruby/object:Api::Type::String + name: 'privateKeyPath' + description: | + The path to the file holding the client or server private key. + - !ruby/object:Api::Type::NestedObject + name: 'sdsConfig' + description: | + [Deprecated] The configuration to access the SDS server. The configuration to access the SDS server. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'grpcServiceConfig' + description: | + [Deprecated] gRPC config to access the SDS server. gRPC config to access the SDS server. + properties: + - !ruby/object:Api::Type::String + name: 'targetUri' + description: | + The target URI of the SDS server. + - !ruby/object:Api::Type::NestedObject + name: 'channelCredentials' + description: | + [Deprecated] gRPC channel credentials to access the SDS server. gRPC channel credentials to access the SDS server. + properties: + - !ruby/object:Api::Type::Enum + name: 'channelCredentialType' + description: | + The channel credentials to access the SDS server. This field can be set to one of the following: CERTIFICATES: Use TLS certificates to access the SDS server. GCE_VM: Use local GCE VM credentials to access the SDS server. + values: + - :CERTIFICATES + - :GCE_VM + - :INVALID + - !ruby/object:Api::Type::NestedObject + name: 'certificates' + description: | + [Deprecated] The paths to the mounted TLS Certificates and private key. The paths to the mounted TLS Certificates and private key. + properties: + - !ruby/object:Api::Type::String + name: 'certificatePath' + description: | + The path to the file holding the client or server TLS certificate to use. + - !ruby/object:Api::Type::String + name: 'privateKeyPath' + description: | + The path to the file holding the client or server private key. + - !ruby/object:Api::Type::NestedObject + name: 'callCredentials' + description: | + [Deprecated] gRPC call credentials to access the SDS server. gRPC call credentials to access the SDS server. + properties: + - !ruby/object:Api::Type::Enum + name: 'callCredentialType' + description: | + The type of call credentials to use for GRPC requests to the SDS server. This field can be set to one of the following: - GCE_VM: The local GCE VM service account credentials are used to access the SDS server. - FROM_PLUGIN: Custom authenticator credentials are used to access the SDS server. + values: + - :FROM_PLUGIN + - :GCE_VM + - :INVALID + - !ruby/object:Api::Type::NestedObject + name: 'fromPlugin' + description: | + [Deprecated] Custom authenticator credentials. Custom authenticator credentials. + properties: + - !ruby/object:Api::Type::String + name: 'name' + description: | + Plugin name. + - !ruby/object:Api::Type::String + name: 'structConfig' + description: | + A text proto that conforms to a Struct type definition interpreted by the plugin. + - !ruby/object:Api::Type::NestedObject + name: 'validationContext' + description: | + [Deprecated] Defines the mechanism to obtain the Certificate Authority certificate to validate the client/server certificate. validate the client/server certificate. + properties: + - !ruby/object:Api::Type::Enum + name: 'validationSource' + description: | + Defines how TLS certificates are obtained. + values: + - :INVALID + - :USE_PATH + - :USE_SDS + - !ruby/object:Api::Type::String + name: 'certificatePath' + description: | + The path to the file holding the CA certificate to validate the client or server certificate. + - !ruby/object:Api::Type::NestedObject + name: 'sdsConfig' + description: | + [Deprecated] The configuration to access the SDS server. The configuration to access the SDS server. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'grpcServiceConfig' + description: | + [Deprecated] gRPC config to access the SDS server. gRPC config to access the SDS server. + properties: + - !ruby/object:Api::Type::String + name: 'targetUri' + description: | + The target URI of the SDS server. + - !ruby/object:Api::Type::NestedObject + name: 'channelCredentials' + description: | + [Deprecated] gRPC channel credentials to access the SDS server. gRPC channel credentials to access the SDS server. + properties: + - !ruby/object:Api::Type::Enum + name: 'channelCredentialType' + description: | + The channel credentials to access the SDS server. This field can be set to one of the following: CERTIFICATES: Use TLS certificates to access the SDS server. GCE_VM: Use local GCE VM credentials to access the SDS server. + values: + - :CERTIFICATES + - :GCE_VM + - :INVALID + - !ruby/object:Api::Type::NestedObject + name: 'certificates' + description: | + [Deprecated] The paths to the mounted TLS Certificates and private key. The paths to the mounted TLS Certificates and private key. + properties: + - !ruby/object:Api::Type::String + name: 'certificatePath' + description: | + The path to the file holding the client or server TLS certificate to use. + - !ruby/object:Api::Type::String + name: 'privateKeyPath' + description: | + The path to the file holding the client or server private key. + - !ruby/object:Api::Type::NestedObject + name: 'callCredentials' + description: | + [Deprecated] gRPC call credentials to access the SDS server. gRPC call credentials to access the SDS server. + properties: + - !ruby/object:Api::Type::Enum + name: 'callCredentialType' + description: | + The type of call credentials to use for GRPC requests to the SDS server. This field can be set to one of the following: - GCE_VM: The local GCE VM service account credentials are used to access the SDS server. - FROM_PLUGIN: Custom authenticator credentials are used to access the SDS server. + values: + - :FROM_PLUGIN + - :GCE_VM + - :INVALID + - !ruby/object:Api::Type::NestedObject + name: 'fromPlugin' + description: | + [Deprecated] Custom authenticator credentials. Custom authenticator credentials. + properties: + - !ruby/object:Api::Type::String + name: 'name' + description: | + Plugin name. + - !ruby/object:Api::Type::String + name: 'structConfig' + description: | + A text proto that conforms to a Struct type definition interpreted by the plugin. + - !ruby/object:Api::Type::Array + name: 'subjectAltNames' + description: | + A list of alternate names to verify the subject identity in the certificate presented by the client. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'region' + description: | + [Output Only] URL of the region where the regional SSL policy resides. This field is not applicable to global SSL policies. + + + + + - !ruby/object:Api::Resource + name: RegionAutoscaler + base_url: 'projects/{{project}}/regions/{{region}}/autoscalers' + self_link: 'projects/{{project}}/regions/{{region}}/autoscalers/{{autoscaler}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/compute/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + Represents an Autoscaler resource. Google Compute Engine has two Autoscaler resources: * [Zonal](/compute/docs/reference/rest/alpha/autoscalers) * [Regional](/compute/docs/reference/rest/alpha/regionAutoscalers) Use autoscalers to automatically add or delete instances from a managed instance group according to your defined autoscaling policy. For more information, read Autoscaling Groups of Instances. For zonal managed instance groups resource, use the autoscaler resource. For regional managed instance groups, use the regionAutoscalers resource. + properties: + + - !ruby/object:Api::Type::String + name: 'kind' + description: | + [Output Only] Type of the resource. Always compute#autoscaler for autoscalers. + - !ruby/object:Api::Type::String + name: 'id' + description: | + [Output Only] The unique identifier for the resource. This identifier is defined by the server. + - !ruby/object:Api::Type::String + name: 'creationTimestamp' + description: | + [Output Only] Creation timestamp in RFC3339 text format. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. + - !ruby/object:Api::Type::String + name: 'description' + description: | + An optional description of this resource. Provide this property when you create the resource. + - !ruby/object:Api::Type::String + name: 'target' + description: | + URL of the managed instance group that this autoscaler will scale. This field is required when creating an autoscaler. + - !ruby/object:Api::Type::NestedObject + name: 'autoscalingPolicy' + description: | + Cloud Autoscaler policy. + properties: + - !ruby/object:Api::Type::Integer + name: 'minNumReplicas' + description: | + The minimum number of replicas that the autoscaler can scale in to. This cannot be less than 0. If not provided, autoscaler chooses a default value depending on maximum number of instances allowed. + - !ruby/object:Api::Type::Integer + name: 'maxNumReplicas' + description: | + The maximum number of instances that the autoscaler can scale out to. This is required when creating or updating an autoscaler. The maximum number of replicas must not be lower than minimal number of replicas. + - !ruby/object:Api::Type::NestedObject + name: 'scaleDownControl' + description: | + Configuration that allows for slower scale in so that even if Autoscaler recommends an abrupt scale in of a MIG, it will be throttled as specified by the parameters below. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'maxScaledDownReplicas' + description: | + Encapsulates numeric value that can be either absolute or relative. + properties: + - !ruby/object:Api::Type::Integer + name: 'fixed' + description: | + Specifies a fixed number of VM instances. This must be a positive integer. + - !ruby/object:Api::Type::Integer + name: 'percent' + description: | + Specifies a percentage of instances between 0 to 100%, inclusive. For example, specify 80 for 80%. + - !ruby/object:Api::Type::Integer + name: 'calculated' + description: | + [Output Only] Absolute value of VM instances calculated based on the specific mode. - If the value is fixed, then the calculated value is equal to the fixed value. - If the value is a percent, then the calculated value is percent/100 * targetSize. For example, the calculated value of a 80% of a managed instance group with 150 instances would be (80/100 * 150) = 120 VM instances. If there is a remainder, the number is rounded. + - !ruby/object:Api::Type::Integer + name: 'timeWindowSec' + description: | + How far back autoscaling looks when computing recommendations to include directives regarding slower scale in, as described above. + - !ruby/object:Api::Type::NestedObject + name: 'scaleInControl' + description: | + Configuration that allows for slower scale in so that even if Autoscaler recommends an abrupt scale in of a MIG, it will be throttled as specified by the parameters below. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'maxScaledInReplicas' + description: | + Encapsulates numeric value that can be either absolute or relative. + properties: + - !ruby/object:Api::Type::Integer + name: 'fixed' + description: | + Specifies a fixed number of VM instances. This must be a positive integer. + - !ruby/object:Api::Type::Integer + name: 'percent' + description: | + Specifies a percentage of instances between 0 to 100%, inclusive. For example, specify 80 for 80%. + - !ruby/object:Api::Type::Integer + name: 'calculated' + description: | + [Output Only] Absolute value of VM instances calculated based on the specific mode. - If the value is fixed, then the calculated value is equal to the fixed value. - If the value is a percent, then the calculated value is percent/100 * targetSize. For example, the calculated value of a 80% of a managed instance group with 150 instances would be (80/100 * 150) = 120 VM instances. If there is a remainder, the number is rounded. + - !ruby/object:Api::Type::Integer + name: 'timeWindowSec' + description: | + How far back autoscaling looks when computing recommendations to include directives regarding slower scale in, as described above. + - !ruby/object:Api::Type::Integer + name: 'coolDownPeriodSec' + description: | + The number of seconds that your application takes to initialize on a VM instance. This is referred to as the [initialization period](/compute/docs/autoscaler#cool_down_period). Specifying an accurate initialization period improves autoscaler decisions. For example, when scaling out, the autoscaler ignores data from VMs that are still initializing because those VMs might not yet represent normal usage of your application. The default initialization period is 60 seconds. Initialization periods might vary because of numerous factors. We recommend that you test how long your application takes to initialize. To do this, create a VM and time your application's startup process. + - !ruby/object:Api::Type::NestedObject + name: 'cpuUtilization' + description: | + CPU utilization policy. + properties: + - !ruby/object:Api::Type::Integer + name: 'utilizationTarget' + description: | + The target CPU utilization that the autoscaler maintains. Must be a float value in the range (0, 1]. If not specified, the default is 0.6. If the CPU level is below the target utilization, the autoscaler scales in the number of instances until it reaches the minimum number of instances you specified or until the average CPU of your instances reaches the target utilization. If the average CPU is above the target utilization, the autoscaler scales out until it reaches the maximum number of instances you specified or until the average utilization reaches the target utilization. + - !ruby/object:Api::Type::Enum + name: 'predictiveMethod' + description: | + Indicates whether predictive autoscaling based on CPU metric is enabled. Valid values are: * NONE (default). No predictive method is used. The autoscaler scales the group to meet current demand based on real-time metrics. * OPTIMIZE_AVAILABILITY. Predictive autoscaling improves availability by monitoring daily and weekly load patterns and scaling out ahead of anticipated demand. + values: + - :NONE + - :OPTIMIZE_AVAILABILITY + - :PREDICTIVE_METHOD_UNSPECIFIED + - :STANDARD + - !ruby/object:Api::Type::Array + name: 'customMetricUtilizations' + description: | + Configuration parameters of autoscaling based on a custom metric. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'metric' + description: | + The identifier (type) of the Stackdriver Monitoring metric. The metric cannot have negative values. The metric must have a value type of INT64 or DOUBLE. + - !ruby/object:Api::Type::String + name: 'filter' + description: | + A filter string, compatible with a Stackdriver Monitoring filter string for TimeSeries.list API call. This filter is used to select a specific TimeSeries for the purpose of autoscaling and to determine whether the metric is exporting per-instance or per-group data. For the filter to be valid for autoscaling purposes, the following rules apply: - You can only use the AND operator for joining selectors. - You can only use direct equality comparison operator (=) without any functions for each selector. - You can specify the metric in both the filter string and in the metric field. However, if specified in both places, the metric must be identical. - The monitored resource type determines what kind of values are expected for the metric. If it is a gce_instance, the autoscaler expects the metric to include a separate TimeSeries for each instance in a group. In such a case, you cannot filter on resource labels. If the resource type is any other value, the autoscaler expects this metric to contain values that apply to the entire autoscaled instance group and resource label filtering can be performed to point autoscaler at the correct TimeSeries to scale upon. This is called a *per-group metric* for the purpose of autoscaling. If not specified, the type defaults to gce_instance. Try to provide a filter that is selective enough to pick just one TimeSeries for the autoscaled group or for each of the instances (if you are using gce_instance resource type). If multiple TimeSeries are returned upon the query execution, the autoscaler will sum their respective values to obtain its scaling value. + - !ruby/object:Api::Type::Integer + name: 'utilizationTarget' + description: | + The target value of the metric that autoscaler maintains. This must be a positive value. A utilization metric scales number of virtual machines handling requests to increase or decrease proportionally to the metric. For example, a good metric to use as a utilization_target is https://www.googleapis.com/compute/v1/instance/network/received_bytes_count. The autoscaler works to keep this value constant for each of the instances. + - !ruby/object:Api::Type::Integer + name: 'singleInstanceAssignment' + description: | + If scaling is based on a per-group metric value that represents the total amount of work to be done or resource usage, set this value to an amount assigned for a single instance of the scaled group. Autoscaler keeps the number of instances proportional to the value of this metric. The metric itself does not change value due to group resizing. A good metric to use with the target is for example pubsub.googleapis.com/subscription/num_undelivered_messages or a custom metric exporting the total number of requests coming to your instances. A bad example would be a metric exporting an average or median latency, since this value can't include a chunk assignable to a single instance, it could be better used with utilization_target instead. + - !ruby/object:Api::Type::Enum + name: 'utilizationTargetType' + description: | + Defines how target utilization value is expressed for a Stackdriver Monitoring metric. Either GAUGE, DELTA_PER_SECOND, or DELTA_PER_MINUTE. + values: + - :DELTA_PER_MINUTE + - :DELTA_PER_SECOND + - :GAUGE + - !ruby/object:Api::Type::NestedObject + name: 'loadBalancingUtilization' + description: | + Configuration parameters of autoscaling based on load balancing. + properties: + - !ruby/object:Api::Type::Integer + name: 'utilizationTarget' + description: | + Fraction of backend capacity utilization (set in HTTP(S) load balancing configuration) that the autoscaler maintains. Must be a positive float value. If not defined, the default is 0.8. + - !ruby/object:Api::Type::Enum + name: 'mode' + description: | + Defines the operating mode for this policy. The following modes are available: - OFF: Disables the autoscaler but maintains its configuration. - ONLY_SCALE_OUT: Restricts the autoscaler to add VM instances only. - ON: Enables all autoscaler activities according to its policy. For more information, see "Turning off or restricting an autoscaler" + values: + - :OFF + - :ON + - :ONLY_SCALE_OUT + - :ONLY_UP + - !ruby/object:Api::Type::NestedObject + name: 'scalingSchedules' + description: | + Scaling schedules defined for an autoscaler. Multiple schedules can be set on an autoscaler, and they can overlap. During overlapping periods the greatest min_required_replicas of all scaling schedules is applied. Up to 128 scaling schedules are allowed. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + Scaling based on user-defined schedule. The message describes a single scaling schedule. A scaling schedule changes the minimum number of VM instances an autoscaler can recommend, which can trigger scaling out. + - !ruby/object:Api::Type::String + name: 'zone' + description: | + [Output Only] URL of the zone where the instance group resides (for autoscalers living in zonal scope). + - !ruby/object:Api::Type::String + name: 'region' + description: | + [Output Only] URL of the region where the instance group resides (for autoscalers living in regional scope). + - !ruby/object:Api::Type::String + name: 'selfLink' + description: | + [Output Only] Server-defined URL for the resource. + - !ruby/object:Api::Type::String + name: 'selfLinkWithId' + description: | + [Output Only] Server-defined URL for this resource with the resource id. + - !ruby/object:Api::Type::Enum + name: 'status' + description: | + [Output Only] The status of the autoscaler configuration. Current set of possible values: - PENDING: Autoscaler backend hasn't read new/updated configuration. - DELETING: Configuration is being deleted. - ACTIVE: Configuration is acknowledged to be effective. Some warnings might be present in the statusDetails field. - ERROR: Configuration has errors. Actionable for users. Details are present in the statusDetails field. New values might be added in the future. + values: + - :ACTIVE + - :DELETING + - :ERROR + - :PENDING + - !ruby/object:Api::Type::Array + name: 'statusDetails' + description: | + [Output Only] Human-readable details about the current state of the autoscaler. Read the documentation for Commonly returned status messages for examples of status messages you might encounter. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'message' + description: | + The status message. + - !ruby/object:Api::Type::Enum + name: 'type' + description: | + The type of error, warning, or notice returned. Current set of possible values: - ALL_INSTANCES_UNHEALTHY (WARNING): All instances in the instance group are unhealthy (not in RUNNING state). - BACKEND_SERVICE_DOES_NOT_EXIST (ERROR): There is no backend service attached to the instance group. - CAPPED_AT_MAX_NUM_REPLICAS (WARNING): Autoscaler recommends a size greater than maxNumReplicas. - CUSTOM_METRIC_DATA_POINTS_TOO_SPARSE (WARNING): The custom metric samples are not exported often enough to be a credible base for autoscaling. - CUSTOM_METRIC_INVALID (ERROR): The custom metric that was specified does not exist or does not have the necessary labels. - MIN_EQUALS_MAX (WARNING): The minNumReplicas is equal to maxNumReplicas. This means the autoscaler cannot add or remove instances from the instance group. - MISSING_CUSTOM_METRIC_DATA_POINTS (WARNING): The autoscaler did not receive any data from the custom metric configured for autoscaling. - MISSING_LOAD_BALANCING_DATA_POINTS (WARNING): The autoscaler is configured to scale based on a load balancing signal but the instance group has not received any requests from the load balancer. - MODE_OFF (WARNING): Autoscaling is turned off. The number of instances in the group won't change automatically. The autoscaling configuration is preserved. - MODE_ONLY_UP (WARNING): Autoscaling is in the "Autoscale only out" mode. The autoscaler can add instances but not remove any. - MORE_THAN_ONE_BACKEND_SERVICE (ERROR): The instance group cannot be autoscaled because it has more than one backend service attached to it. - NOT_ENOUGH_QUOTA_AVAILABLE (ERROR): There is insufficient quota for the necessary resources, such as CPU or number of instances. - REGION_RESOURCE_STOCKOUT (ERROR): Shown only for regional autoscalers: there is a resource stockout in the chosen region. - SCALING_TARGET_DOES_NOT_EXIST (ERROR): The target to be scaled does not exist. - UNSUPPORTED_MAX_RATE_LOAD_BALANCING_CONFIGURATION (ERROR): Autoscaling does not work with an HTTP/S load balancer that has been configured for maxRate. - ZONE_RESOURCE_STOCKOUT (ERROR): For zonal autoscalers: there is a resource stockout in the chosen zone. For regional autoscalers: in at least one of the zones you're using there is a resource stockout. New values might be added in the future. Some of the values might not be available in all API versions. + values: + - :ALL_INSTANCES_UNHEALTHY + - :BACKEND_SERVICE_DOES_NOT_EXIST + - :CAPPED_AT_MAX_NUM_REPLICAS + - :CUSTOM_METRIC_DATA_POINTS_TOO_SPARSE + - :CUSTOM_METRIC_INVALID + - :MIN_EQUALS_MAX + - :MISSING_CUSTOM_METRIC_DATA_POINTS + - :MISSING_LOAD_BALANCING_DATA_POINTS + - :MODE_OFF + - :MODE_ONLY_SCALE_OUT + - :MODE_ONLY_UP + - :MORE_THAN_ONE_BACKEND_SERVICE + - :NOT_ENOUGH_QUOTA_AVAILABLE + - :REGION_RESOURCE_STOCKOUT + - :SCALING_TARGET_DOES_NOT_EXIST + - :SCHEDULED_INSTANCES_GREATER_THAN_AUTOSCALER_MAX + - :SCHEDULED_INSTANCES_LESS_THAN_AUTOSCALER_MIN + - :UNKNOWN + - :UNSUPPORTED_MAX_RATE_LOAD_BALANCING_CONFIGURATION + - :ZONE_RESOURCE_STOCKOUT + - !ruby/object:Api::Type::Integer + name: 'recommendedSize' + description: | + [Output Only] Target recommended MIG size (number of instances) computed by autoscaler. Autoscaler calculates the recommended MIG size even when the autoscaling policy mode is different from ON. This field is empty when autoscaler is not connected to an existing managed instance group or autoscaler did not generate its prediction. + - !ruby/object:Api::Type::NestedObject + name: 'scalingScheduleStatus' + description: | + [Output Only] Status information of existing scaling schedules. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + + + + + - !ruby/object:Api::Resource + name: TargetGrpcProxy + base_url: 'projects/{{project}}/global/targetGrpcProxies' + self_link: 'projects/{{project}}/global/targetGrpcProxies/{{targetGrpcProxy}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/compute/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + Represents a Target gRPC Proxy resource. A target gRPC proxy is a component of load balancers intended for load balancing gRPC traffic. Only global forwarding rules with load balancing scheme INTERNAL_SELF_MANAGED can reference a target gRPC proxy. The target gRPC Proxy references a URL map that specifies how traffic is routed to gRPC backend services. + properties: + + - !ruby/object:Api::Type::String + name: 'kind' + description: | + [Output Only] Type of the resource. Always compute#targetGrpcProxy for target grpc proxies. + - !ruby/object:Api::Type::String + name: 'id' + description: | + [Output Only] The unique identifier for the resource type. The server generates this identifier. + - !ruby/object:Api::Type::String + name: 'creationTimestamp' + description: | + [Output Only] Creation timestamp in RFC3339 text format. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. + - !ruby/object:Api::Type::String + name: 'description' + description: | + An optional description of this resource. Provide this property when you create the resource. + - !ruby/object:Api::Type::String + name: 'selfLink' + description: | + [Output Only] Server-defined URL for the resource. + - !ruby/object:Api::Type::String + name: 'selfLinkWithId' + description: | + [Output Only] Server-defined URL with id for the resource. + - !ruby/object:Api::Type::String + name: 'urlMap' + description: | + URL to the UrlMap resource that defines the mapping from URL to the BackendService. The protocol field in the BackendService must be set to GRPC. + - !ruby/object:Api::Type::Boolean + name: 'validateForProxyless' + description: | + If true, indicates that the BackendServices referenced by the urlMap may be accessed by gRPC applications without using a sidecar proxy. This will enable configuration checks on urlMap and its referenced BackendServices to not allow unsupported features. A gRPC application must use "xds:///" scheme in the target URI of the service it is connecting to. If false, indicates that the BackendServices referenced by the urlMap will be accessed by gRPC applications via a sidecar proxy. In this case, a gRPC application must not use "xds:///" scheme in the target URI of the service it is connecting to + - !ruby/object:Api::Type::String + name: 'fingerprint' + description: | + Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. This field will be ignored when inserting a TargetGrpcProxy. An up-to-date fingerprint must be provided in order to patch/update the TargetGrpcProxy; otherwise, the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make a get() request to retrieve the TargetGrpcProxy. + + + + + - !ruby/object:Api::Resource + name: TargetGrpcProxy + base_url: 'projects/{{project}}/global/targetGrpcProxies' + self_link: 'projects/{{project}}/global/targetGrpcProxies/{{targetGrpcProxy}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/compute/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + Represents a Target gRPC Proxy resource. A target gRPC proxy is a component of load balancers intended for load balancing gRPC traffic. Only global forwarding rules with load balancing scheme INTERNAL_SELF_MANAGED can reference a target gRPC proxy. The target gRPC Proxy references a URL map that specifies how traffic is routed to gRPC backend services. + properties: + + - !ruby/object:Api::Type::String + name: 'kind' + description: | + [Output Only] Type of the resource. Always compute#targetGrpcProxy for target grpc proxies. + - !ruby/object:Api::Type::String + name: 'id' + description: | + [Output Only] The unique identifier for the resource type. The server generates this identifier. + - !ruby/object:Api::Type::String + name: 'creationTimestamp' + description: | + [Output Only] Creation timestamp in RFC3339 text format. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. + - !ruby/object:Api::Type::String + name: 'description' + description: | + An optional description of this resource. Provide this property when you create the resource. + - !ruby/object:Api::Type::String + name: 'selfLink' + description: | + [Output Only] Server-defined URL for the resource. + - !ruby/object:Api::Type::String + name: 'selfLinkWithId' + description: | + [Output Only] Server-defined URL with id for the resource. + - !ruby/object:Api::Type::String + name: 'urlMap' + description: | + URL to the UrlMap resource that defines the mapping from URL to the BackendService. The protocol field in the BackendService must be set to GRPC. + - !ruby/object:Api::Type::Boolean + name: 'validateForProxyless' + description: | + If true, indicates that the BackendServices referenced by the urlMap may be accessed by gRPC applications without using a sidecar proxy. This will enable configuration checks on urlMap and its referenced BackendServices to not allow unsupported features. A gRPC application must use "xds:///" scheme in the target URI of the service it is connecting to. If false, indicates that the BackendServices referenced by the urlMap will be accessed by gRPC applications via a sidecar proxy. In this case, a gRPC application must not use "xds:///" scheme in the target URI of the service it is connecting to + - !ruby/object:Api::Type::String + name: 'fingerprint' + description: | + Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. This field will be ignored when inserting a TargetGrpcProxy. An up-to-date fingerprint must be provided in order to patch/update the TargetGrpcProxy; otherwise, the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make a get() request to retrieve the TargetGrpcProxy. + + + + + - !ruby/object:Api::Resource + name: ResourcePolicy + base_url: 'projects/{{project}}/regions/{{region}}/resourcePolicies' + self_link: 'projects/{{project}}/regions/{{region}}/resourcePolicies/{{resourcePolicy}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/compute/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + Represents a Resource Policy resource. You can use resource policies to schedule actions for some Compute Engine resources. For example, you can use them to schedule persistent disk snapshots. + properties: + + - !ruby/object:Api::Type::String + name: 'kind' + description: | + [Output Only] Type of the resource. Always compute#resource_policies for resource policies. + - !ruby/object:Api::Type::String + name: 'id' + description: | + [Output Only] The unique identifier for the resource. This identifier is defined by the server. + - !ruby/object:Api::Type::String + name: 'creationTimestamp' + description: | + [Output Only] Creation timestamp in RFC3339 text format. + - !ruby/object:Api::Type::String + name: 'selfLink' + description: | + [Output Only] Server-defined fully-qualified URL for this resource. + - !ruby/object:Api::Type::String + name: 'selfLinkWithId' + description: | + [Output Only] Server-defined URL for this resource with the resource id. + - !ruby/object:Api::Type::String + name: 'region' + description: | + + - !ruby/object:Api::Type::String + name: 'description' + description: | + + - !ruby/object:Api::Type::String + name: 'name' + description: | + The name of the resource, provided by the client when initially creating the resource. The resource name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. + - !ruby/object:Api::Type::NestedObject + name: 'vmMaintenancePolicy' + description: | + + properties: + - !ruby/object:Api::Type::NestedObject + name: 'maintenanceWindow' + description: | + A maintenance window for VMs. When set, we restrict our maintenance operations to this window. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'dailyMaintenanceWindow' + description: | + Time window specified for daily operations. + properties: + - !ruby/object:Api::Type::Integer + name: 'daysInCycle' + description: | + Defines a schedule with units measured in days. The value determines how many days pass between the start of each cycle. + - !ruby/object:Api::Type::String + name: 'startTime' + description: | + Start time of the window. This must be in UTC format that resolves to one of 00:00, 04:00, 08:00, 12:00, 16:00, or 20:00. For example, both 13:00-5 and 08:00 are valid. + - !ruby/object:Api::Type::String + name: 'duration' + description: | + [Output only] A predetermined duration for the window, automatically chosen to be the smallest possible in the given scenario. + - !ruby/object:Api::Type::NestedObject + name: 'concurrencyControlGroup' + description: | + A concurrency control configuration. Defines a group config that, when attached to an instance, recognizes that instance as part of a group of instances where only up the concurrency_limit of instances in that group can undergo simultaneous maintenance. For more information: go/concurrency-control-design-doc + properties: + - !ruby/object:Api::Type::Integer + name: 'concurrencyLimit' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'snapshotSchedulePolicy' + description: | + A snapshot schedule policy specifies when and how frequently snapshots are to be created for the target disk. Also specifies how many and how long these scheduled snapshots should be retained. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'schedule' + description: | + A schedule for disks where the schedueled operations are performed. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'hourlySchedule' + description: | + Time window specified for hourly operations. + properties: + - !ruby/object:Api::Type::Integer + name: 'hoursInCycle' + description: | + Defines a schedule with units measured in hours. The value determines how many hours pass between the start of each cycle. + - !ruby/object:Api::Type::String + name: 'startTime' + description: | + Time within the window to start the operations. It must be in format "HH:MM", where HH : [00-23] and MM : [00-00] GMT. + - !ruby/object:Api::Type::String + name: 'duration' + description: | + [Output only] Duration of the time window, automatically chosen to be smallest possible in the given scenario. + - !ruby/object:Api::Type::NestedObject + name: 'dailySchedule' + description: | + Time window specified for daily operations. + properties: + - !ruby/object:Api::Type::Integer + name: 'daysInCycle' + description: | + Defines a schedule with units measured in days. The value determines how many days pass between the start of each cycle. + - !ruby/object:Api::Type::String + name: 'startTime' + description: | + Start time of the window. This must be in UTC format that resolves to one of 00:00, 04:00, 08:00, 12:00, 16:00, or 20:00. For example, both 13:00-5 and 08:00 are valid. + - !ruby/object:Api::Type::String + name: 'duration' + description: | + [Output only] A predetermined duration for the window, automatically chosen to be the smallest possible in the given scenario. + - !ruby/object:Api::Type::NestedObject + name: 'weeklySchedule' + description: | + Time window specified for weekly operations. + properties: + - !ruby/object:Api::Type::Array + name: 'dayOfWeeks' + description: | + Up to 7 intervals/windows, one for each day of the week. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::Enum + name: 'day' + description: | + Defines a schedule that runs on specific days of the week. Specify one or more days. The following options are available: MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY. + values: + - :FRIDAY + - :INVALID + - :MONDAY + - :SATURDAY + - :SUNDAY + - :THURSDAY + - :TUESDAY + - :WEDNESDAY + - !ruby/object:Api::Type::String + name: 'startTime' + description: | + Time within the window to start the operations. It must be in format "HH:MM", where HH : [00-23] and MM : [00-00] GMT. + - !ruby/object:Api::Type::String + name: 'duration' + description: | + [Output only] Duration of the time window, automatically chosen to be smallest possible in the given scenario. + - !ruby/object:Api::Type::NestedObject + name: 'retentionPolicy' + description: | + Policy for retention of scheduled snapshots. + properties: + - !ruby/object:Api::Type::Integer + name: 'maxRetentionDays' + description: | + Maximum age of the snapshot that is allowed to be kept. + - !ruby/object:Api::Type::Enum + name: 'onPolicySwitch' + description: | + + values: + - :DO_NOT_RETROACTIVELY_APPLY + - :RETROACTIVELY_APPLY + - :UNSPECIFIED_ON_POLICY_SWITCH + - !ruby/object:Api::Type::Enum + name: 'onSourceDiskDelete' + description: | + Specifies the behavior to apply to scheduled snapshots when the source disk is deleted. + values: + - :APPLY_RETENTION_POLICY + - :KEEP_AUTO_SNAPSHOTS + - :UNSPECIFIED_ON_SOURCE_DISK_DELETE + - !ruby/object:Api::Type::NestedObject + name: 'snapshotProperties' + description: | + Specified snapshot properties for scheduled snapshots created by this policy. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + Labels to apply to scheduled snapshots. These can be later modified by the setLabels method. Label values may be empty. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::Array + name: 'storageLocations' + description: | + Cloud Storage bucket storage location of the auto snapshot (regional or multi-regional). + item_type: Api::Type::String + - !ruby/object:Api::Type::Boolean + name: 'guestFlush' + description: | + Indication to perform a 'guest aware' snapshot. + - !ruby/object:Api::Type::String + name: 'chainName' + description: | + Chain name that the snapshot is created in. + - !ruby/object:Api::Type::NestedObject + name: 'groupPlacementPolicy' + description: | + A GroupPlacementPolicy specifies resource placement configuration. It specifies the failure bucket separation as well as network locality + properties: + - !ruby/object:Api::Type::Enum + name: 'style' + description: | + Specifies instances to hosts placement relationship + values: + - :COMPACT + - :FULLY_SPREAD + - :UNSPECIFIED_PLACEMENT_TYPE + - !ruby/object:Api::Type::Enum + name: 'locality' + description: | + Specifies network locality + values: + - :BEST_EFFORT + - :STRICT + - :UNSPECIFIED_LOCALITY + - !ruby/object:Api::Type::Integer + name: 'vmCount' + description: | + Number of VMs in this placement group. Google does not recommend that you use this field unless you use a compact policy and you want your policy to work only if it contains this exact number of VMs. + - !ruby/object:Api::Type::Integer + name: 'availabilityDomainCount' + description: | + The number of availability domains to spread instances across. If two instances are in different availability domain, they are not in the same low latency network. + - !ruby/object:Api::Type::Enum + name: 'collocation' + description: | + Specifies network collocation + values: + - :CLUSTERED + - :COLLOCATED + - :UNSPECIFIED_COLLOCATION + - !ruby/object:Api::Type::Enum + name: 'scope' + description: | + Scope specifies the availability domain to which the VMs should be spread. + values: + - :HOST + - :UNSPECIFIED_SCOPE + - !ruby/object:Api::Type::String + name: 'tpuTopology' + description: | + Specifies the shape of the TPU slice + - !ruby/object:Api::Type::Integer + name: 'maxDistance' + description: | + Specifies the number of max logical switches. + - !ruby/object:Api::Type::Integer + name: 'sliceCount' + description: | + Specifies the number of slices in a multislice workload. + - !ruby/object:Api::Type::NestedObject + name: 'instanceSchedulePolicy' + description: | + An InstanceSchedulePolicy specifies when and how frequent certain operations are performed on the instance. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'vmStartSchedule' + description: | + Schedule for an instance operation. + properties: + - !ruby/object:Api::Type::String + name: 'schedule' + description: | + Specifies the frequency for the operation, using the unix-cron format. + - !ruby/object:Api::Type::NestedObject + name: 'vmStopSchedule' + description: | + Schedule for an instance operation. + properties: + - !ruby/object:Api::Type::String + name: 'schedule' + description: | + Specifies the frequency for the operation, using the unix-cron format. + - !ruby/object:Api::Type::String + name: 'timeZone' + description: | + Specifies the time zone to be used in interpreting Schedule.schedule. The value of this field must be a time zone name from the tz database: https://wikipedia.org/wiki/Tz_database. + - !ruby/object:Api::Type::String + name: 'startTime' + description: | + The start time of the schedule. The timestamp is an RFC3339 string. + - !ruby/object:Api::Type::String + name: 'expirationTime' + description: | + The expiration time of the schedule. The timestamp is an RFC3339 string. + - !ruby/object:Api::Type::Enum + name: 'status' + description: | + [Output Only] The status of resource policy creation. + values: + - :CREATING + - :DELETING + - :EXPIRED + - :INVALID + - :READY + - !ruby/object:Api::Type::NestedObject + name: 'resourceStatus' + description: | + Contains output only fields. Use this sub-message for all output fields set on ResourcePolicy. The internal structure of this "status" field should mimic the structure of ResourcePolicy proto specification. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'instanceSchedulePolicy' + description: | + + properties: + - !ruby/object:Api::Type::String + name: 'nextRunStartTime' + description: | + [Output Only] The next time the schedule is planned to run. The actual time might be slightly different. The timestamp is an RFC3339 string. + - !ruby/object:Api::Type::String + name: 'lastRunStartTime' + description: | + [Output Only] The last time the schedule successfully ran. The timestamp is an RFC3339 string. + + + + + - !ruby/object:Api::Resource + name: ResourcePolicy + base_url: 'projects/{{project}}/regions/{{region}}/resourcePolicies' + self_link: 'projects/{{project}}/regions/{{region}}/resourcePolicies/{{resourcePolicy}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/compute/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + Represents a Resource Policy resource. You can use resource policies to schedule actions for some Compute Engine resources. For example, you can use them to schedule persistent disk snapshots. + properties: + + - !ruby/object:Api::Type::String + name: 'kind' + description: | + [Output Only] Type of the resource. Always compute#resource_policies for resource policies. + - !ruby/object:Api::Type::String + name: 'id' + description: | + [Output Only] The unique identifier for the resource. This identifier is defined by the server. + - !ruby/object:Api::Type::String + name: 'creationTimestamp' + description: | + [Output Only] Creation timestamp in RFC3339 text format. + - !ruby/object:Api::Type::String + name: 'selfLink' + description: | + [Output Only] Server-defined fully-qualified URL for this resource. + - !ruby/object:Api::Type::String + name: 'selfLinkWithId' + description: | + [Output Only] Server-defined URL for this resource with the resource id. + - !ruby/object:Api::Type::String + name: 'region' + description: | + + - !ruby/object:Api::Type::String + name: 'description' + description: | + + - !ruby/object:Api::Type::String + name: 'name' + description: | + The name of the resource, provided by the client when initially creating the resource. The resource name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. + - !ruby/object:Api::Type::NestedObject + name: 'vmMaintenancePolicy' + description: | + + properties: + - !ruby/object:Api::Type::NestedObject + name: 'maintenanceWindow' + description: | + A maintenance window for VMs. When set, we restrict our maintenance operations to this window. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'dailyMaintenanceWindow' + description: | + Time window specified for daily operations. + properties: + - !ruby/object:Api::Type::Integer + name: 'daysInCycle' + description: | + Defines a schedule with units measured in days. The value determines how many days pass between the start of each cycle. + - !ruby/object:Api::Type::String + name: 'startTime' + description: | + Start time of the window. This must be in UTC format that resolves to one of 00:00, 04:00, 08:00, 12:00, 16:00, or 20:00. For example, both 13:00-5 and 08:00 are valid. + - !ruby/object:Api::Type::String + name: 'duration' + description: | + [Output only] A predetermined duration for the window, automatically chosen to be the smallest possible in the given scenario. + - !ruby/object:Api::Type::NestedObject + name: 'concurrencyControlGroup' + description: | + A concurrency control configuration. Defines a group config that, when attached to an instance, recognizes that instance as part of a group of instances where only up the concurrency_limit of instances in that group can undergo simultaneous maintenance. For more information: go/concurrency-control-design-doc + properties: + - !ruby/object:Api::Type::Integer + name: 'concurrencyLimit' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'snapshotSchedulePolicy' + description: | + A snapshot schedule policy specifies when and how frequently snapshots are to be created for the target disk. Also specifies how many and how long these scheduled snapshots should be retained. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'schedule' + description: | + A schedule for disks where the schedueled operations are performed. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'hourlySchedule' + description: | + Time window specified for hourly operations. + properties: + - !ruby/object:Api::Type::Integer + name: 'hoursInCycle' + description: | + Defines a schedule with units measured in hours. The value determines how many hours pass between the start of each cycle. + - !ruby/object:Api::Type::String + name: 'startTime' + description: | + Time within the window to start the operations. It must be in format "HH:MM", where HH : [00-23] and MM : [00-00] GMT. + - !ruby/object:Api::Type::String + name: 'duration' + description: | + [Output only] Duration of the time window, automatically chosen to be smallest possible in the given scenario. + - !ruby/object:Api::Type::NestedObject + name: 'dailySchedule' + description: | + Time window specified for daily operations. + properties: + - !ruby/object:Api::Type::Integer + name: 'daysInCycle' + description: | + Defines a schedule with units measured in days. The value determines how many days pass between the start of each cycle. + - !ruby/object:Api::Type::String + name: 'startTime' + description: | + Start time of the window. This must be in UTC format that resolves to one of 00:00, 04:00, 08:00, 12:00, 16:00, or 20:00. For example, both 13:00-5 and 08:00 are valid. + - !ruby/object:Api::Type::String + name: 'duration' + description: | + [Output only] A predetermined duration for the window, automatically chosen to be the smallest possible in the given scenario. + - !ruby/object:Api::Type::NestedObject + name: 'weeklySchedule' + description: | + Time window specified for weekly operations. + properties: + - !ruby/object:Api::Type::Array + name: 'dayOfWeeks' + description: | + Up to 7 intervals/windows, one for each day of the week. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::Enum + name: 'day' + description: | + Defines a schedule that runs on specific days of the week. Specify one or more days. The following options are available: MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY. + values: + - :FRIDAY + - :INVALID + - :MONDAY + - :SATURDAY + - :SUNDAY + - :THURSDAY + - :TUESDAY + - :WEDNESDAY + - !ruby/object:Api::Type::String + name: 'startTime' + description: | + Time within the window to start the operations. It must be in format "HH:MM", where HH : [00-23] and MM : [00-00] GMT. + - !ruby/object:Api::Type::String + name: 'duration' + description: | + [Output only] Duration of the time window, automatically chosen to be smallest possible in the given scenario. + - !ruby/object:Api::Type::NestedObject + name: 'retentionPolicy' + description: | + Policy for retention of scheduled snapshots. + properties: + - !ruby/object:Api::Type::Integer + name: 'maxRetentionDays' + description: | + Maximum age of the snapshot that is allowed to be kept. + - !ruby/object:Api::Type::Enum + name: 'onPolicySwitch' + description: | + + values: + - :DO_NOT_RETROACTIVELY_APPLY + - :RETROACTIVELY_APPLY + - :UNSPECIFIED_ON_POLICY_SWITCH + - !ruby/object:Api::Type::Enum + name: 'onSourceDiskDelete' + description: | + Specifies the behavior to apply to scheduled snapshots when the source disk is deleted. + values: + - :APPLY_RETENTION_POLICY + - :KEEP_AUTO_SNAPSHOTS + - :UNSPECIFIED_ON_SOURCE_DISK_DELETE + - !ruby/object:Api::Type::NestedObject + name: 'snapshotProperties' + description: | + Specified snapshot properties for scheduled snapshots created by this policy. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + Labels to apply to scheduled snapshots. These can be later modified by the setLabels method. Label values may be empty. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::Array + name: 'storageLocations' + description: | + Cloud Storage bucket storage location of the auto snapshot (regional or multi-regional). + item_type: Api::Type::String + - !ruby/object:Api::Type::Boolean + name: 'guestFlush' + description: | + Indication to perform a 'guest aware' snapshot. + - !ruby/object:Api::Type::String + name: 'chainName' + description: | + Chain name that the snapshot is created in. + - !ruby/object:Api::Type::NestedObject + name: 'groupPlacementPolicy' + description: | + A GroupPlacementPolicy specifies resource placement configuration. It specifies the failure bucket separation as well as network locality + properties: + - !ruby/object:Api::Type::Enum + name: 'style' + description: | + Specifies instances to hosts placement relationship + values: + - :COMPACT + - :FULLY_SPREAD + - :UNSPECIFIED_PLACEMENT_TYPE + - !ruby/object:Api::Type::Enum + name: 'locality' + description: | + Specifies network locality + values: + - :BEST_EFFORT + - :STRICT + - :UNSPECIFIED_LOCALITY + - !ruby/object:Api::Type::Integer + name: 'vmCount' + description: | + Number of VMs in this placement group. Google does not recommend that you use this field unless you use a compact policy and you want your policy to work only if it contains this exact number of VMs. + - !ruby/object:Api::Type::Integer + name: 'availabilityDomainCount' + description: | + The number of availability domains to spread instances across. If two instances are in different availability domain, they are not in the same low latency network. + - !ruby/object:Api::Type::Enum + name: 'collocation' + description: | + Specifies network collocation + values: + - :CLUSTERED + - :COLLOCATED + - :UNSPECIFIED_COLLOCATION + - !ruby/object:Api::Type::Enum + name: 'scope' + description: | + Scope specifies the availability domain to which the VMs should be spread. + values: + - :HOST + - :UNSPECIFIED_SCOPE + - !ruby/object:Api::Type::String + name: 'tpuTopology' + description: | + Specifies the shape of the TPU slice + - !ruby/object:Api::Type::Integer + name: 'maxDistance' + description: | + Specifies the number of max logical switches. + - !ruby/object:Api::Type::Integer + name: 'sliceCount' + description: | + Specifies the number of slices in a multislice workload. + - !ruby/object:Api::Type::NestedObject + name: 'instanceSchedulePolicy' + description: | + An InstanceSchedulePolicy specifies when and how frequent certain operations are performed on the instance. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'vmStartSchedule' + description: | + Schedule for an instance operation. + properties: + - !ruby/object:Api::Type::String + name: 'schedule' + description: | + Specifies the frequency for the operation, using the unix-cron format. + - !ruby/object:Api::Type::NestedObject + name: 'vmStopSchedule' + description: | + Schedule for an instance operation. + properties: + - !ruby/object:Api::Type::String + name: 'schedule' + description: | + Specifies the frequency for the operation, using the unix-cron format. + - !ruby/object:Api::Type::String + name: 'timeZone' + description: | + Specifies the time zone to be used in interpreting Schedule.schedule. The value of this field must be a time zone name from the tz database: https://wikipedia.org/wiki/Tz_database. + - !ruby/object:Api::Type::String + name: 'startTime' + description: | + The start time of the schedule. The timestamp is an RFC3339 string. + - !ruby/object:Api::Type::String + name: 'expirationTime' + description: | + The expiration time of the schedule. The timestamp is an RFC3339 string. + - !ruby/object:Api::Type::Enum + name: 'status' + description: | + [Output Only] The status of resource policy creation. + values: + - :CREATING + - :DELETING + - :EXPIRED + - :INVALID + - :READY + - !ruby/object:Api::Type::NestedObject + name: 'resourceStatus' + description: | + Contains output only fields. Use this sub-message for all output fields set on ResourcePolicy. The internal structure of this "status" field should mimic the structure of ResourcePolicy proto specification. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'instanceSchedulePolicy' + description: | + + properties: + - !ruby/object:Api::Type::String + name: 'nextRunStartTime' + description: | + [Output Only] The next time the schedule is planned to run. The actual time might be slightly different. The timestamp is an RFC3339 string. + - !ruby/object:Api::Type::String + name: 'lastRunStartTime' + description: | + [Output Only] The last time the schedule successfully ran. The timestamp is an RFC3339 string. + + + + + - !ruby/object:Api::Resource + name: TargetSslProxy + base_url: 'projects/{{project}}/global/targetSslProxies' + self_link: 'projects/{{project}}/global/targetSslProxies/{{targetSslProxy}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/compute/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + Represents a Target SSL Proxy resource. A target SSL proxy is a component of a Proxy Network Load Balancer. The forwarding rule references the target SSL proxy, and the target proxy then references a backend service. For more information, read Proxy Network Load Balancer overview. + properties: + + - !ruby/object:Api::Type::String + name: 'kind' + description: | + [Output Only] Type of the resource. Always compute#targetSslProxy for target SSL proxies. + - !ruby/object:Api::Type::String + name: 'id' + description: | + [Output Only] The unique identifier for the resource. This identifier is defined by the server. + - !ruby/object:Api::Type::String + name: 'creationTimestamp' + description: | + [Output Only] Creation timestamp in RFC3339 text format. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. + - !ruby/object:Api::Type::String + name: 'description' + description: | + An optional description of this resource. Provide this property when you create the resource. + - !ruby/object:Api::Type::String + name: 'selfLink' + description: | + [Output Only] Server-defined URL for the resource. + - !ruby/object:Api::Type::String + name: 'service' + description: | + URL to the BackendService resource. + - !ruby/object:Api::Type::Array + name: 'sslCertificates' + description: | + URLs to SslCertificate resources that are used to authenticate connections to Backends. At least one SSL certificate must be specified. Currently, you may specify up to 15 SSL certificates. sslCertificates do not apply when the load balancing scheme is set to INTERNAL_SELF_MANAGED. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'certificateMap' + description: | + URL of a certificate map that identifies a certificate map associated with the given target proxy. This field can only be set for global target proxies. If set, sslCertificates will be ignored. Accepted format is //certificatemanager.googleapis.com/projects/{project }/locations/{location}/certificateMaps/{resourceName}. + - !ruby/object:Api::Type::Enum + name: 'proxyHeader' + description: | + Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE. + values: + - :NONE + - :PROXY_V1 + - !ruby/object:Api::Type::String + name: 'sslPolicy' + description: | + URL of SslPolicy resource that will be associated with the TargetSslProxy resource. If not set, the TargetSslProxy resource will not have any SSL policy configured. + + + + + - !ruby/object:Api::Resource + name: TargetSslProxy + base_url: 'projects/{{project}}/global/targetSslProxies' + self_link: 'projects/{{project}}/global/targetSslProxies/{{targetSslProxy}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/compute/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + Represents a Target SSL Proxy resource. A target SSL proxy is a component of a Proxy Network Load Balancer. The forwarding rule references the target SSL proxy, and the target proxy then references a backend service. For more information, read Proxy Network Load Balancer overview. + properties: + + - !ruby/object:Api::Type::String + name: 'kind' + description: | + [Output Only] Type of the resource. Always compute#targetSslProxy for target SSL proxies. + - !ruby/object:Api::Type::String + name: 'id' + description: | + [Output Only] The unique identifier for the resource. This identifier is defined by the server. + - !ruby/object:Api::Type::String + name: 'creationTimestamp' + description: | + [Output Only] Creation timestamp in RFC3339 text format. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. + - !ruby/object:Api::Type::String + name: 'description' + description: | + An optional description of this resource. Provide this property when you create the resource. + - !ruby/object:Api::Type::String + name: 'selfLink' + description: | + [Output Only] Server-defined URL for the resource. + - !ruby/object:Api::Type::String + name: 'service' + description: | + URL to the BackendService resource. + - !ruby/object:Api::Type::Array + name: 'sslCertificates' + description: | + URLs to SslCertificate resources that are used to authenticate connections to Backends. At least one SSL certificate must be specified. Currently, you may specify up to 15 SSL certificates. sslCertificates do not apply when the load balancing scheme is set to INTERNAL_SELF_MANAGED. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'certificateMap' + description: | + URL of a certificate map that identifies a certificate map associated with the given target proxy. This field can only be set for global target proxies. If set, sslCertificates will be ignored. Accepted format is //certificatemanager.googleapis.com/projects/{project }/locations/{location}/certificateMaps/{resourceName}. + - !ruby/object:Api::Type::Enum + name: 'proxyHeader' + description: | + Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE. + values: + - :NONE + - :PROXY_V1 + - !ruby/object:Api::Type::String + name: 'sslPolicy' + description: | + URL of SslPolicy resource that will be associated with the TargetSslProxy resource. If not set, the TargetSslProxy resource will not have any SSL policy configured. + + + + - !ruby/object:Api::Resource + name: 'InterconnectAttachment' + kind: 'compute#interconnectattachments' + base_url: 'projects/{{project}}/regions/{{region}}/interconnectAttachments/{{interconnectAttachment}}' + collection_url_key: 'items' + input: true + has_self_link: true + description: | + An instance is a virtual machine (VM) hosted on Google's infrastructure. + properties: + - !ruby/object:Api::Type::String + name: 'kind' + description: | + Type of the resource. Always compute#interconnectAttachment for interconnect attachments. + - !ruby/object:Api::Type::String + name: 'description' + description: | + An optional description of this resource. + - !ruby/object:Api::Type::String + name: 'selfLink' + description: | + Server-defined URL for the resource. + - !ruby/object:Api::Type::String + name: 'id' + description: | + string (uint64 format) The unique identifier for the resource. This identifier is defined by the server. + - !ruby/object:Api::Type::String + name: 'creationTimestamp' + description: | + Creation timestamp in RFC3339 text format. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. + - !ruby/object:Api::Type::String + name: 'interconnect' + description: | + URL of the underlying Interconnect object that this attachment's traffic will traverse through. + - !ruby/object:Api::Type::String + name: 'router' + description: | + URL of the Cloud Router to be used for dynamic routing. This router must be in the same region as this InterconnectAttachment. The InterconnectAttachment will automatically connect the Interconnect to the network & region within which the Cloud Router is configured. + - !ruby/object:Api::Type::String + name: 'region' + description: | + URL of the region where the regional interconnect attachment resides. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body. + - !ruby/object:Api::Type::String + name: 'googleReferenceId' + description: | + + - !ruby/object:Api::Type::Integer + name: 'mtu' + description: | + integer Maximum Transmission Unit (MTU), in bytes, of packets passing through this interconnect attachment. Only 1440 and 1500 are allowed. If not specified, the value will default to 1440. + - !ruby/object:Api::Type::NestedObject + name: 'privateInterconnectInfo' + description: | + object Information specific to an InterconnectAttachment. This property is populated if the interconnect that this is attached to is of type DEDICATED. + properties: + - !ruby/object:Api::Type::Integer + name: 'tag8021q' + description: | + integer 802.1q encapsulation tag to be used for traffic between Google and the customer, going to and from this network and region. + - !ruby/object:Api::Type::Enum + name: 'operationalStatus' + description: | + The current status of whether or not this interconnect attachment is functional, which can take one of the following values: OS_ACTIVE: The attachment has been turned up and is ready to use. OS_UNPROVISIONED: The attachment is not ready to use yet, because turnup is not complete. + values: + - :OS_ACTIVE + - :OS_UNPROVISIONED + + default_value: :OS_ACTIVE + - !ruby/object:Api::Type::String + name: 'cloudRouterIpAddress' + description: | + IPv4 address + prefix length to be configured on Cloud Router Interface for this interconnect attachment. + - !ruby/object:Api::Type::String + name: 'customerRouterIpAddress' + description: | + IPv4 address + prefix length to be configured on the customer router subinterface for this interconnect attachment. + - !ruby/object:Api::Type::Enum + name: 'type' + description: | + The type of interconnect attachment this is, which can take one of the following values: DEDICATED: an attachment to a Dedicated Interconnect. PARTNER: an attachment to a Partner Interconnect, created by the customer. PARTNER_PROVIDER: an attachment to a Partner Interconnect, created by the partner. + values: + - :DEDICATED + - :PARTNER + - :PARTNER_PROVIDER + + default_value: :DEDICATED + - !ruby/object:Api::Type::String + name: 'pairingKey' + description: | + [Output only for type PARTNER. Input only for PARTNER_PROVIDER. Not present for DEDICATED]. The opaque identifier of a PARTNER attachment used to initiate provisioning with a selected partner. Of the form "XXXXX/region/domain" + - !ruby/object:Api::Type::Boolean + name: 'adminEnabled' + description: | + boolean Determines whether this Attachment will carry packets. Not present for PARTNER_PROVIDER. + - !ruby/object:Api::Type::Integer + name: 'vlanTag8021q' + description: | + integer The IEEE 802.1Q VLAN tag for this attachment, in the range 2-4093. Only specified at creation time. + - !ruby/object:Api::Type::Enum + name: 'edgeAvailabilityDomain' + description: | + Desired availability domain for the attachment. Only available for type PARTNER, at creation time, and can take one of the following values: AVAILABILITY_DOMAIN_ANY AVAILABILITY_DOMAIN_1 AVAILABILITY_DOMAIN_2 For improved reliability, customers should configure a pair of attachments, one per availability domain. The selected availability domain will be provided to the Partner via the pairing key, so that the provisioned circuit will lie in the specified domain. If not specified, the value will default to AVAILABILITY_DOMAIN_ANY. + values: + - :PARTNER + - :AVAILABILITY_DOMAIN_ANY + - :AVAILABILITY_DOMAIN_1 + - :AVAILABILITY_DOMAIN_2 + - :AVAILABILITY_DOMAIN_ANY + + default_value: :PARTNER + - !ruby/object:Api::Type::Array + name: 'candidateSubnets' + description: | + Up to 16 candidate prefixes that can be used to restrict the allocation of cloudRouterIpAddress and customerRouterIpAddress for this attachment. All prefixes must be within link-local address space (169.254.0.0/16) and must be /29 or shorter (/28, /27, etc). Google will attempt to select an unused /29 from the supplied candidate prefix(es). The request will fail if all possible /29s are in use on Google's edge. If not supplied, Google will randomly select an unused /29 from all of link-local space. + item_type: Api::Type::String + - !ruby/object:Api::Type::Enum + name: 'bandwidth' + description: | + Provisioned bandwidth capacity for the interconnect attachment. For attachments of type DEDICATED, the user can set the bandwidth. For attachments of type PARTNER, the Google Partner that is operating the interconnect must set the bandwidth. Output only for PARTNER type, mutable for PARTNER_PROVIDER and DEDICATED, and can take one of the following values: BPS_50M: 50 Mbit/s BPS_100M: 100 Mbit/s BPS_200M: 200 Mbit/s BPS_300M: 300 Mbit/s BPS_400M: 400 Mbit/s BPS_500M: 500 Mbit/s BPS_1G: 1 Gbit/s BPS_2G: 2 Gbit/s BPS_5G: 5 Gbit/s BPS_10G: 10 Gbit/s BPS_20G: 20 Gbit/s BPS_50G: 50 Gbit/s + values: + - :DEDICATED + - :PARTNER + - :PARTNER + - :PARTNER_PROVIDER + - :DEDICATED + - :BPS_50M + - :50 + - :BPS_100M + - :100 + - :BPS_200M + - :200 + - :BPS_300M + - :300 + - :BPS_400M + - :400 + - :BPS_500M + - :500 + - :BPS_1G + - :1 + - :BPS_2G + - :2 + - :BPS_5G + - :5 + - :BPS_10G + - :10 + - :BPS_20G + - :20 + - :BPS_50G + - :50 + + default_value: :DEDICATED + - !ruby/object:Api::Type::NestedObject + name: 'partnerMetadata' + description: | + object Informational metadata about Partner attachments from Partners to display to customers. Output only for PARTNER type, mutable for PARTNER_PROVIDER, not available for DEDICATED. + properties: + - !ruby/object:Api::Type::String + name: 'partnerName' + description: | + Plain text name of the Partner providing this attachment. This value may be validated to match approved Partner values. + - !ruby/object:Api::Type::String + name: 'interconnectName' + description: | + Plain text name of the Interconnect this attachment is connected to, as displayed in the Partner's portal. For instance "Chicago 1". This value may be validated to match approved Partner values. + - !ruby/object:Api::Type::String + name: 'portalUrl' + description: | + URL of the Partner's portal for this Attachment. Partners may customise this to be a deep link to the specific resource on the Partner portal. This value may be validated to match approved Partner values. + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + map (key: string, value: string) Labels for this resource. These can only be added or modified by the setLabels method. Each label key/value pair must comply with RFC1035. Label values may be empty. + properties: + - !ruby/object:Api::Type::String + name: 'string' + description: | + + - !ruby/object:Api::Type::String + name: 'labelFingerprint' + description: | + string (bytes format) A fingerprint for the labels being applied to this InterconnectAttachment, which is essentially a hash of the labels set used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels, otherwise the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make a get() request to retrieve an InterconnectAttachment. A base64-encoded string. + - !ruby/object:Api::Type::Enum + name: 'state' + description: | + The current state of this attachment's functionality. Enum values ACTIVE and UNPROVISIONED are shared by DEDICATED/PRIVATE, PARTNER, and PARTNER_PROVIDER interconnect attachments, while enum values PENDING_PARTNER, PARTNER_REQUEST_RECEIVED, and PENDING_CUSTOMER are used for only PARTNER and PARTNER_PROVIDER interconnect attachments. This state can take one of the following values: ACTIVE: The attachment has been turned up and is ready to use. UNPROVISIONED: The attachment is not ready to use yet, because turnup is not complete. PENDING_PARTNER: A newly-created PARTNER attachment that has not yet been configured on the Partner side. PARTNER_REQUEST_RECEIVED: A PARTNER attachment is in the process of provisioning after a PARTNER_PROVIDER attachment was created that references it. PENDING_CUSTOMER: A PARTNER or PARTNER_PROVIDER attachment that is waiting for a customer to activate it. DEFUNCT: The attachment was deleted externally and is no longer functional. This could be because the associated Interconnect was removed, or because the other side of a Partner attachment was deleted. + values: + - :ACTIVE + - :UNPROVISIONED + - :DEDICATED + - :PRIVATE + - :PARTNER + - :PARTNER_PROVIDER + - :PENDING_PARTNER + - :PARTNER_REQUEST_RECEIVED + - :PENDING_CUSTOMER + - :PARTNER + - :PARTNER_PROVIDER + - :ACTIVE + - :UNPROVISIONED + - :PENDING_PARTNER + - :A + - :PARTNER + - :PARTNER_REQUEST_RECEIVED + - :A + - :PARTNER + - :PARTNER_PROVIDER + - :PENDING_CUSTOMER + - :A + - :PARTNER + - :PARTNER_PROVIDER + - :DEFUNCT + + default_value: :ACTIVE + - !ruby/object:Api::Type::String + name: 'partnerAsn' + description: | + string (int64 format) Optional BGP ASN for the router supplied by a Layer 3 Partner if they configured BGP on behalf of the customer. Output only for PARTNER type, input only for PARTNER_PROVIDER, not available for DEDICATED. + - !ruby/object:Api::Type::Enum + name: 'encryption' + description: | + Indicates the user-supplied encryption option of this VLAN attachment (interconnectAttachment). Can only be specified at attachment creation for PARTNER or DEDICATED attachments. Possible values are: NONE - This is the default value, which means that the VLAN attachment carries unencrypted traffic. VMs are able to send traffic to, or receive traffic from, such a VLAN attachment. IPSEC - The VLAN attachment carries only encrypted traffic that is encrypted by an IPsec device, such as an HA VPN gateway or third-party IPsec VPN. VMs cannot directly send traffic to, or receive traffic from, such a VLAN attachment. To use HA VPN over Cloud Interconnect, the VLAN attachment must be created with this option. + values: + - :VLAN + - :PARTNER + - :DEDICATED + - :NONE + - :VLAN + - :VLAN + - :IPSEC + - :VLAN + - :HA + - :VPN + - :VPN + - :VLAN + - :HA + - :VPN + - :VLAN + + default_value: :VLAN + - !ruby/object:Api::Type::Array + name: 'ipsecInternalAddresses' + description: | + A list of URLs of addresses that have been reserved for the VLAN attachment. Used only for the VLAN attachment that has the encryption option as IPSEC. The addresses must be regional internal IP address ranges. When creating an HA VPN gateway over the VLAN attachment, if the attachment is configured to use a regional internal IP address, then the VPN gateway's IP address is allocated from the IP address range specified here. For example, if the HA VPN gateway's interface 0 is paired to this VLAN attachment, then a regional internal IP address for the VPN gateway interface 0 will be allocated from the IP address specified for this VLAN attachment. If this field is not specified when creating the VLAN attachment, then later on when creating an HA VPN gateway on this VLAN attachment, the HA VPN gateway's IP address is allocated from the regional external IP address pool. + item_type: Api::Type::String + - !ruby/object:Api::Type::Integer + name: 'dataplaneVersion' + description: | + integer Dataplane version for this InterconnectAttachment. This field is only present for Dataplane version 2 and higher. Absence of this field in the API output indicates that the Dataplane is version 1. + - !ruby/object:Api::Type::Boolean + name: 'satisfiesPzs' + description: | + boolean Reserved for future use. + - !ruby/object:Api::Type::Enum + name: 'stackType' + description: | + The stack type for this interconnect attachment to identify whether the IPv6 feature is enabled or not. If not specified, IPV4_ONLY will be used. This field can be both set at interconnect attachments creation and update interconnect attachment operations. + values: + - :IPV4_ONLY + + default_value: :IPV4_ONLY + - !ruby/object:Api::Type::String + name: 'cloudRouterIpv6Address' + description: | + IPv6 address + prefix length to be configured on Cloud Router Interface for this interconnect attachment. + - !ruby/object:Api::Type::String + name: 'customerRouterIpv6Address' + description: | + IPv6 address + prefix length to be configured on the customer router subinterface for this interconnect attachment. + - !ruby/object:Api::Type::Array + name: 'candidateIpv6Subnets' + description: | + This field is not available. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'cloudRouterIpv6InterfaceId' + description: | + This field is not available. + - !ruby/object:Api::Type::String + name: 'customerRouterIpv6InterfaceId' + description: | + This field is not available. + - !ruby/object:Api::Type::Integer + name: 'subnetLength' + description: | + integer Length of the IPv4 subnet mask. Allowed values: 29 (default) 30 The default value is 29, except for Cross-Cloud Interconnect connections that use an InterconnectRemoteLocation with a constraints.subnetLengthRange.min equal to 30. For example, connections that use an Azure remote location fall into this category. In these cases, the default value is 30, and requesting 29 returns an error. Where both 29 and 30 are allowed, 29 is preferred, because it gives Google Cloud Support more debugging visibility. The default value is 29, except for Cross-Cloud Interconnect connections that use an InterconnectRemoteLocation with a constraints.subnetLengthRange.min equal to 30. For example, connections that use an Azure remote location fall into this category. In these cases, the default value is 30, and requesting 29 returns an error. Where both 29 and 30 are allowed, 29 is preferred, because it gives Google Cloud Support more debugging visibility. + - !ruby/object:Api::Type::String + name: 'remoteService' + description: | + If the attachment is on a Cross-Cloud Interconnect connection, this field contains the interconnect's remote location service provider. Example values: "Amazon Web Services" "Microsoft Azure". The field is set only for attachments on Cross-Cloud Interconnect connections. Its value is copied from the InterconnectRemoteLocation remoteService field. + - !ruby/object:Api::Type::NestedObject + name: 'configurationConstraints' + description: | + object Constraints for this attachment, if any. The attachment does not work if these constraints are not met. + properties: + - !ruby/object:Api::Type::Enum + name: 'bgpMd5' + description: | + Whether the attachment's BGP session requires/allows/disallows BGP MD5 authentication. This can take one of the following values: MD5_OPTIONAL, MD5_REQUIRED, MD5_UNSUPPORTED. For example, a Cross-Cloud Interconnect connection to a remote cloud provider that requires BGP MD5 authentication has the interconnectRemoteLocation attachmentConfigurationConstraints.bgp_md5 field set to MD5_REQUIRED, and that property is propagated to the attachment. Similarly, if BGP MD5 is MD5_UNSUPPORTED, an error is returned if MD5 is requested. + values: + - :BGP + - :BGP + - :MD5 + - :MD5_OPTIONAL + - :MD5_REQUIRED + - :MD5_UNSUPPORTED + + default_value: :BGP + - !ruby/object:Api::Type::Array + name: 'bgpPeerAsnRanges' + description: | + object interconnectAttachments.list of ASN ranges that the remote location is known to support. Formatted as an array of inclusive ranges {min: min-value, max: max-value}. For example, [{min: 123, max: 123}, {min: 64512, max: 65534}] allows the peer ASN to be 123 or anything in the range 64512-65534. This field is only advisory. Although the API accepts other ranges, these are the ranges that we recommend. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::Integer + name: 'min' + description: | + integer (uint32 format) + - !ruby/object:Api::Type::Integer + name: 'max' + description: | + integer (uint32 format) + + + + - !ruby/object:Api::Resource + name: 'RegionSecurityPolicy' + kind: 'compute#regionsecuritypolicies' + base_url: 'projects/{{project}}/regions/{{region}}/securityPolicies/{{securityPolicy}}' + collection_url_key: 'items' + input: true + has_self_link: true + description: | + An instance is a virtual machine (VM) hosted on Google's infrastructure. + properties: + - !ruby/object:Api::Type::String + name: 'kind' + description: | + [Output only] Type of the resource. Always compute#securityPolicyfor security policies + - !ruby/object:Api::Type::String + name: 'id' + description: | + string (uint64 format) The unique identifier for the resource. This identifier is defined by the server. + - !ruby/object:Api::Type::String + name: 'creationTimestamp' + description: | + Creation timestamp in RFC3339 text format. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. + - !ruby/object:Api::Type::String + name: 'description' + description: | + An optional description of this resource. Provide this property when you create the resource. + - !ruby/object:Api::Type::Array + name: 'rules' + description: | + object A list of rules that belong to this policy. There must always be a default rule which is a rule with priority 2147483647 and match all condition (for the match condition this means match "*" for srcIpRanges and for the networkMatch condition every field must be either match "*" or not set). If no rules are provided when creating a security policy, a default rule with action "allow" will be added. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'kind' + description: | + [Output only] Type of the resource. Always compute#securityPolicyRule for security policy rules + - !ruby/object:Api::Type::String + name: 'description' + description: | + An optional description of this resource. Provide this property when you create the resource. + - !ruby/object:Api::Type::Integer + name: 'priority' + description: | + integer An integer indicating the priority of a rule in the list. The priority must be a positive value between 0 and 2147483647. Rules are evaluated from highest to lowest priority where 0 is the highest priority and 2147483647 is the lowest priority. + - !ruby/object:Api::Type::NestedObject + name: 'match' + description: | + object A match condition that incoming traffic is evaluated against. If it evaluates to true, the corresponding 'action' is enforced. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'expr' + description: | + object User defined CEVAL expression. A CEVAL expression is used to specify match criteria such as origin.ip, source.region_code and contents in the request header. Expressions containing evaluateThreatIntelligence require Cloud Armor Managed Protection Plus tier and are not supported in Edge Policies nor in Regional Policies. Expressions containing evaluatePreconfiguredExpr('sourceiplist-*') require Cloud Armor Managed Protection Plus tier and are only supported in Global Security Policies. + properties: + - !ruby/object:Api::Type::String + name: 'expression' + description: | + Textual representation of an expression in Common Expression Language syntax. + - !ruby/object:Api::Type::String + name: 'title' + description: | + Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression. + - !ruby/object:Api::Type::String + name: 'description' + description: | + Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. + - !ruby/object:Api::Type::String + name: 'location' + description: | + Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file. + - !ruby/object:Api::Type::Enum + name: 'versionedExpr' + description: | + Preconfigured versioned expression. If this field is specified, config must also be specified. Available preconfigured expressions along with their requirements are: SRC_IPS_V1 - must specify the corresponding srcIpRange field in config. + values: + - :SRC_IPS_V1 + + default_value: :SRC_IPS_V1 + - !ruby/object:Api::Type::NestedObject + name: 'config' + description: | + object The configuration options available when specifying versionedExpr. This field must be specified if versionedExpr is specified and cannot be specified if versionedExpr is not specified. + properties: + - !ruby/object:Api::Type::Array + name: 'srcIpRanges' + description: | + CIDR IP address range. Maximum number of srcIpRanges allowed is 10. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'action' + description: | + The Action to perform when the rule is matched. The following are the valid actions: allow: allow access to target. deny(STATUS): deny access to target, returns the HTTP response code specified. Valid values for STATUS are 403, 404, and 502. rate_based_ban: limit client traffic to the configured threshold and ban the client if the traffic exceeds the threshold. Configure parameters for this action in RateLimitOptions. Requires rateLimitOptions to be set. redirect: redirect to a different target. This can either be an internal reCAPTCHA redirect, or an external URL-based redirect via a 302 response. Parameters for this action can be configured via redirectOptions. This action is only supported in Global Security Policies of type CLOUD_ARMOR. throttle: limit client traffic to the configured threshold. Configure parameters for this action in rateLimitOptions. Requires rateLimitOptions to be set for this. + - !ruby/object:Api::Type::Boolean + name: 'preview' + description: | + boolean If set to true, the specified action is not enforced. + - !ruby/object:Api::Type::NestedObject + name: 'rateLimitOptions' + description: | + object Must be specified if the action is "rate_based_ban" or "throttle". Cannot be specified for any other actions. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'rateLimitThreshold' + description: | + object Threshold at which to begin ratelimiting. + properties: + - !ruby/object:Api::Type::Integer + name: 'count' + description: | + integer Number of HTTP(S) requests for calculating the threshold. + - !ruby/object:Api::Type::Integer + name: 'intervalSec' + description: | + integer Interval over which the threshold is computed. + - !ruby/object:Api::Type::String + name: 'conformAction' + description: | + Action to take for requests that are under the configured rate limit threshold. Valid option is "allow" only. + - !ruby/object:Api::Type::String + name: 'exceedAction' + description: | + Action to take for requests that are above the configured rate limit threshold, to either deny with a specified HTTP response code, or redirect to a different endpoint. Valid options are deny(STATUS), where valid values for STATUS are 403, 404, 429, and 502, and redirect, where the redirect parameters come from exceedRedirectOptions below. The redirect action is only supported in Global Security Policies of type CLOUD_ARMOR. + - !ruby/object:Api::Type::NestedObject + name: 'exceedRedirectOptions' + description: | + object Parameters defining the redirect action that is used as the exceed action. Cannot be specified if the exceed action is not redirect. This field is only supported in Global Security Policies of type CLOUD_ARMOR. + properties: + - !ruby/object:Api::Type::Enum + name: 'type' + description: | + Type of the redirect action. + values: + - :VALUE_1 + + default_value: :VALUE_1 + - !ruby/object:Api::Type::String + name: 'target' + description: | + Target for the redirect action. This is required if the type is EXTERNAL_302 and cannot be specified for GOOGLE_RECAPTCHA. + - !ruby/object:Api::Type::Enum + name: 'enforceOnKey' + description: | + Determines the key to enforce the rateLimitThreshold on. Possible values are: ALL: A single rate limit threshold is applied to all the requests matching this rule. This is the default value if "enforceOnKey" is not configured. IP: The source IP address of the request is the key. Each IP has this limit enforced separately. HTTP_HEADER: The value of the HTTP header whose name is configured under "enforceOnKeyName". The key value is truncated to the first 128 bytes of the header value. If no such header is present in the request, the key type defaults to ALL. XFF_IP: The first IP address (i.e. the originating client IP address) specified in the list of IPs under X-Forwarded-For HTTP header. If no such header is present or the value is not a valid IP, the key defaults to the source IP address of the request i.e. key type IP. HTTP_COOKIE: The value of the HTTP cookie whose name is configured under "enforceOnKeyName". The key value is truncated to the first 128 bytes of the cookie value. If no such cookie is present in the request, the key type defaults to ALL. HTTP_PATH: The URL path of the HTTP request. The key value is truncated to the first 128 bytes. SNI: Server name indication in the TLS session of the HTTPS request. The key value is truncated to the first 128 bytes. The key type defaults to ALL on a HTTP session. REGION_CODE: The country/region from which the request originates. + values: + - :ALL + - :A + - :IP + - :IP + - :IP + - :HTTP_HEADER + - :HTTP + - :128 + - :ALL + - :XFF_IP + - :IP + - :IP + - :X + - :HTTP + - :IP + - :IP + - :IP + - :HTTP_COOKIE + - :HTTP + - :128 + - :ALL + - :HTTP_PATH + - :URL + - :HTTP + - :128 + - :SNI + - :TLS + - :HTTPS + - :128 + - :ALL + - :HTTP + - :REGION_CODE + + default_value: :ALL + - !ruby/object:Api::Type::String + name: 'enforceOnKeyName' + description: | + Rate limit key name applicable only for the following key types: HTTP_HEADER -- Name of the HTTP header whose value is taken as the key value. HTTP_COOKIE -- Name of the HTTP cookie whose value is taken as the key value. + - !ruby/object:Api::Type::Array + name: 'enforceOnKeyConfigs' + description: | + object If specified, any combination of values of enforceOnKeyType/enforceOnKeyName is treated as the key on which ratelimit threshold/action is enforced. You can specify up to 3 enforceOnKeyConfigs. If enforceOnKeyConfigs is specified, enforceOnKey must not be specified. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::Enum + name: 'enforceOnKeyType' + description: | + Determines the key to enforce the rateLimitThreshold on. Possible values are: ALL: A single rate limit threshold is applied to all the requests matching this rule. This is the default value if "enforceOnKeyConfigs" is not configured. IP: The source IP address of the request is the key. Each IP has this limit enforced separately. HTTP_HEADER: The value of the HTTP header whose name is configured under "enforceOnKeyName". The key value is truncated to the first 128 bytes of the header value. If no such header is present in the request, the key type defaults to ALL. XFF_IP: The first IP address (i.e. the originating client IP address) specified in the list of IPs under X-Forwarded-For HTTP header. If no such header is present or the value is not a valid IP, the key defaults to the source IP address of the request i.e. key type IP. HTTP_COOKIE: The value of the HTTP cookie whose name is configured under "enforceOnKeyName". The key value is truncated to the first 128 bytes of the cookie value. If no such cookie is present in the request, the key type defaults to ALL. HTTP_PATH: The URL path of the HTTP request. The key value is truncated to the first 128 bytes. SNI: Server name indication in the TLS session of the HTTPS request. The key value is truncated to the first 128 bytes. The key type defaults to ALL on a HTTP session. REGION_CODE: The country/region from which the request originates. + values: + - :ALL + - :A + - :IP + - :IP + - :IP + - :HTTP_HEADER + - :HTTP + - :128 + - :ALL + - :XFF_IP + - :IP + - :IP + - :X + - :HTTP + - :IP + - :IP + - :IP + - :HTTP_COOKIE + - :HTTP + - :128 + - :ALL + - :HTTP_PATH + - :URL + - :HTTP + - :128 + - :SNI + - :TLS + - :HTTPS + - :128 + - :ALL + - :HTTP + - :REGION_CODE + + default_value: :ALL + - !ruby/object:Api::Type::String + name: 'enforceOnKeyName' + description: | + Rate limit key name applicable only for the following key types: HTTP_HEADER -- Name of the HTTP header whose value is taken as the key value. HTTP_COOKIE -- Name of the HTTP cookie whose value is taken as the key value. + - !ruby/object:Api::Type::NestedObject + name: 'banThreshold' + description: | + object Can only be specified if the action for the rule is "rate_based_ban". If specified, the key will be banned for the configured 'banDurationSec' when the number of requests that exceed the 'rateLimitThreshold' also exceed this 'banThreshold'. + properties: + - !ruby/object:Api::Type::Integer + name: 'count' + description: | + integer Number of HTTP(S) requests for calculating the threshold. + - !ruby/object:Api::Type::Integer + name: 'intervalSec' + description: | + integer Interval over which the threshold is computed. + - !ruby/object:Api::Type::Integer + name: 'banDurationSec' + description: | + integer Can only be specified if the action for the rule is "rate_based_ban". If specified, determines the time (in seconds) the traffic will continue to be banned by the rate limit after the rate falls below the threshold. + - !ruby/object:Api::Type::NestedObject + name: 'headerAction' + description: | + object Optional, additional actions that are performed on headers. This field is only supported in Global Security Policies of type CLOUD_ARMOR. + properties: + - !ruby/object:Api::Type::Array + name: 'requestHeadersToAdds' + description: | + object The list of request headers to add or overwrite if they're already present. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'headerName' + description: | + The name of the header to set. + - !ruby/object:Api::Type::String + name: 'headerValue' + description: | + The value to set the named header to. + - !ruby/object:Api::Type::NestedObject + name: 'redirectOptions' + description: | + object Parameters defining the redirect action. Cannot be specified for any other actions. This field is only supported in Global Security Policies of type CLOUD_ARMOR. + properties: + - !ruby/object:Api::Type::Enum + name: 'type' + description: | + Type of the redirect action. + values: + - :VALUE_1 + + default_value: :VALUE_1 + - !ruby/object:Api::Type::String + name: 'target' + description: | + Target for the redirect action. This is required if the type is EXTERNAL_302 and cannot be specified for GOOGLE_RECAPTCHA. + - !ruby/object:Api::Type::NestedObject + name: 'preconfiguredWafConfig' + description: | + object Preconfigured WAF configuration to be applied for the rule. If the rule does not evaluate preconfigured WAF rules, i.e., if evaluatePreconfiguredWaf() is not used, this field will have no effect. + properties: + - !ruby/object:Api::Type::Array + name: 'exclusions' + description: | + object A list of exclusions to apply during preconfigured WAF evaluation. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'targetRuleSet' + description: | + Target WAF rule set to apply the preconfigured WAF exclusion. + - !ruby/object:Api::Type::Array + name: 'targetRuleIds' + description: | + A list of target rule IDs under the WAF rule set to apply the preconfigured WAF exclusion. If omitted, it refers to all the rule IDs under the WAF rule set. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'requestHeadersToExclude' + description: | + object A list of request header names whose value will be excluded from inspection during preconfigured WAF evaluation. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'val' + description: | + The value of the field. + - !ruby/object:Api::Type::Enum + name: 'op' + description: | + The match operator for the field. + values: + - :VALUE_1 + + default_value: :VALUE_1 + - !ruby/object:Api::Type::Array + name: 'requestCookiesToExclude' + description: | + object A list of request cookie names whose value will be excluded from inspection during preconfigured WAF evaluation. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'val' + description: | + The value of the field. + - !ruby/object:Api::Type::Enum + name: 'op' + description: | + The match operator for the field. + values: + - :VALUE_1 + + default_value: :VALUE_1 + - !ruby/object:Api::Type::Array + name: 'requestQueryParamsToExclude' + description: | + object A list of request query parameter names whose value will be excluded from inspection during preconfigured WAF evaluation. Note that the parameter can be in the query string or in the POST body. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'val' + description: | + The value of the field. + - !ruby/object:Api::Type::Enum + name: 'op' + description: | + The match operator for the field. + values: + - :VALUE_1 + + default_value: :VALUE_1 + - !ruby/object:Api::Type::Array + name: 'requestUrisToExclude' + description: | + object A list of request URIs from the request line to be excluded from inspection during preconfigured WAF evaluation. When specifying this field, the query or fragment part should be excluded. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'val' + description: | + The value of the field. + - !ruby/object:Api::Type::Enum + name: 'op' + description: | + The match operator for the field. + values: + - :VALUE_1 + + default_value: :VALUE_1 + - !ruby/object:Api::Type::NestedObject + name: 'adaptiveProtectionConfig' + description: | + object + properties: + - !ruby/object:Api::Type::NestedObject + name: 'layer7DdosDefenseConfig' + description: | + object If set to true, enables Cloud Armor Machine Learning. + properties: + - !ruby/object:Api::Type::Boolean + name: 'enable' + description: | + boolean If set to true, enables CAAP for L7 DDoS detection. This field is only supported in Global Security Policies of type CLOUD_ARMOR. + - !ruby/object:Api::Type::Enum + name: 'ruleVisibility' + description: | + Rule visibility can be one of the following: STANDARD - opaque rules. (default) PREMIUM - transparent rules. This field is only supported in Global Security Policies of type CLOUD_ARMOR. + values: + - :STANDARD + - :PREMIUM + - :CLOUD_ARMOR + + default_value: :STANDARD + - !ruby/object:Api::Type::Array + name: 'thresholdConfigs' + description: | + object Configuration options for layer7 adaptive protection for various customizable thresholds. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'name' + description: | + The name must be 1-63 characters long, and comply with RFC1035. The name must be unique within the security policy. + - !ruby/object:Api::Type::Integer + name: 'autoDeployLoadThreshold' + description: | + number + - !ruby/object:Api::Type::Integer + name: 'autoDeployConfidenceThreshold' + description: | + number + - !ruby/object:Api::Type::Integer + name: 'autoDeployImpactedBaselineThreshold' + description: | + number + - !ruby/object:Api::Type::Integer + name: 'autoDeployExpirationSec' + description: | + integer + - !ruby/object:Api::Type::NestedObject + name: 'ddosProtectionConfig' + description: | + object + properties: + - !ruby/object:Api::Type::Enum + name: 'ddosProtection' + description: | + + values: + - :VALUE_1 + + default_value: :VALUE_1 + - !ruby/object:Api::Type::NestedObject + name: 'advancedOptionsConfig' + description: | + object + properties: + - !ruby/object:Api::Type::Enum + name: 'jsonParsing' + description: | + + values: + - :VALUE_1 + + default_value: :VALUE_1 + - !ruby/object:Api::Type::NestedObject + name: 'jsonCustomConfig' + description: | + object Custom configuration to apply the JSON parsing. Only applicable when jsonParsing is set to STANDARD. + properties: + - !ruby/object:Api::Type::Array + name: 'contentTypes' + description: | + A list of custom Content-Type header values to apply the JSON parsing. As per RFC 1341, a Content-Type header value has the following format: Content-Type := type "/" subtype *[";" parameter] When configuring a custom Content-Type header value, only the type/subtype needs to be specified, and the parameters should be excluded. + item_type: Api::Type::String + - !ruby/object:Api::Type::Enum + name: 'logLevel' + description: | + + values: + - :VALUE_1 + + default_value: :VALUE_1 + - !ruby/object:Api::Type::NestedObject + name: 'recaptchaOptionsConfig' + description: | + object + properties: + - !ruby/object:Api::Type::String + name: 'redirectSiteKey' + description: | + An optional field to supply a reCAPTCHA site key to be used for all the rules using the redirect action with the type of GOOGLE_RECAPTCHA under the security policy. The specified site key needs to be created from the reCAPTCHA API. The user is responsible for the validity of the specified site key. If not specified, a Google-managed site key is used. This field is only supported in Global Security Policies of type CLOUD_ARMOR. + - !ruby/object:Api::Type::String + name: 'fingerprint' + description: | + string (bytes format) Specifies a fingerprint for this resource, which is essentially a hash of the metadata's contents and used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update metadata. You must always provide an up-to-date fingerprint hash in order to update or change metadata, otherwise the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make get() request to the security policy. A base64-encoded string. + - !ruby/object:Api::Type::String + name: 'selfLink' + description: | + Server-defined URL for the resource. + - !ruby/object:Api::Type::Enum + name: 'type' + description: | + The type indicates the intended use of the security policy. CLOUD_ARMOR: Cloud Armor backend security policies can be configured to filter incoming HTTP requests targeting backend services. They filter requests before they hit the origin servers. CLOUD_ARMOR_EDGE: Cloud Armor edge security policies can be configured to filter incoming HTTP requests targeting backend services (including Cloud CDN-enabled) as well as backend buckets (Cloud Storage). They filter requests before the request is served from Google's cache. CLOUD_ARMOR_INTERNAL_SERVICE: Cloud Armor internal service policies can be configured to filter HTTP requests targeting services managed by Traffic Director in a service mesh. They filter requests before the request is served from the application. CLOUD_ARMOR_NETWORK: Cloud Armor network policies can be configured to filter packets targeting network load balancing resources such as backend services, target pools, target instances, and instances with external IPs. They filter requests before the request is served from the application. This field can be set only at resource creation time. + values: + - :CLOUD_ARMOR + - :HTTP + - :CLOUD_ARMOR_EDGE + - :HTTP + - :CDN + - :CLOUD_ARMOR_INTERNAL_SERVICE + - :HTTP + - :CLOUD_ARMOR_NETWORK + + default_value: :CLOUD_ARMOR + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + map (key: string, value: string) Labels for this resource. These can only be added or modified by the setLabels method. Each label key/value pair must comply with RFC1035. Label values may be empty. + properties: + - !ruby/object:Api::Type::String + name: 'string' + description: | + + - !ruby/object:Api::Type::String + name: 'labelFingerprint' + description: | + string (bytes format) A fingerprint for the labels being applied to this security policy, which is essentially a hash of the labels set used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels. To see the latest fingerprint, make get() request to the security policy. A base64-encoded string. + - !ruby/object:Api::Type::String + name: 'region' + description: | + URL of the region where the regional security policy resides. This field is not applicable to global security policies. + + + + - !ruby/object:Api::Resource + name: 'RegionSecurityPolicy' + kind: 'compute#regionsecuritypolicies' + base_url: 'projects/{{project}}/regions/{{region}}/securityPolicies/{{securityPolicy}}' + collection_url_key: 'items' + input: true + has_self_link: true + description: | + An instance is a virtual machine (VM) hosted on Google's infrastructure. + properties: + - !ruby/object:Api::Type::String + name: 'kind' + description: | + [Output only] Type of the resource. Always compute#securityPolicyfor security policies + - !ruby/object:Api::Type::String + name: 'id' + description: | + string (uint64 format) The unique identifier for the resource. This identifier is defined by the server. + - !ruby/object:Api::Type::String + name: 'creationTimestamp' + description: | + Creation timestamp in RFC3339 text format. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. + - !ruby/object:Api::Type::String + name: 'description' + description: | + An optional description of this resource. Provide this property when you create the resource. + - !ruby/object:Api::Type::Array + name: 'rules' + description: | + object A list of rules that belong to this policy. There must always be a default rule which is a rule with priority 2147483647 and match all condition (for the match condition this means match "*" for srcIpRanges and for the networkMatch condition every field must be either match "*" or not set). If no rules are provided when creating a security policy, a default rule with action "allow" will be added. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'kind' + description: | + [Output only] Type of the resource. Always compute#securityPolicyRule for security policy rules + - !ruby/object:Api::Type::String + name: 'description' + description: | + An optional description of this resource. Provide this property when you create the resource. + - !ruby/object:Api::Type::Integer + name: 'priority' + description: | + integer An integer indicating the priority of a rule in the list. The priority must be a positive value between 0 and 2147483647. Rules are evaluated from highest to lowest priority where 0 is the highest priority and 2147483647 is the lowest priority. + - !ruby/object:Api::Type::NestedObject + name: 'match' + description: | + object A match condition that incoming traffic is evaluated against. If it evaluates to true, the corresponding 'action' is enforced. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'expr' + description: | + object User defined CEVAL expression. A CEVAL expression is used to specify match criteria such as origin.ip, source.region_code and contents in the request header. Expressions containing evaluateThreatIntelligence require Cloud Armor Managed Protection Plus tier and are not supported in Edge Policies nor in Regional Policies. Expressions containing evaluatePreconfiguredExpr('sourceiplist-*') require Cloud Armor Managed Protection Plus tier and are only supported in Global Security Policies. + properties: + - !ruby/object:Api::Type::String + name: 'expression' + description: | + Textual representation of an expression in Common Expression Language syntax. + - !ruby/object:Api::Type::String + name: 'title' + description: | + Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression. + - !ruby/object:Api::Type::String + name: 'description' + description: | + Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. + - !ruby/object:Api::Type::String + name: 'location' + description: | + Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file. + - !ruby/object:Api::Type::Enum + name: 'versionedExpr' + description: | + Preconfigured versioned expression. If this field is specified, config must also be specified. Available preconfigured expressions along with their requirements are: SRC_IPS_V1 - must specify the corresponding srcIpRange field in config. + values: + - :SRC_IPS_V1 + + default_value: :SRC_IPS_V1 + - !ruby/object:Api::Type::NestedObject + name: 'config' + description: | + object The configuration options available when specifying versionedExpr. This field must be specified if versionedExpr is specified and cannot be specified if versionedExpr is not specified. + properties: + - !ruby/object:Api::Type::Array + name: 'srcIpRanges' + description: | + CIDR IP address range. Maximum number of srcIpRanges allowed is 10. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'action' + description: | + The Action to perform when the rule is matched. The following are the valid actions: allow: allow access to target. deny(STATUS): deny access to target, returns the HTTP response code specified. Valid values for STATUS are 403, 404, and 502. rate_based_ban: limit client traffic to the configured threshold and ban the client if the traffic exceeds the threshold. Configure parameters for this action in RateLimitOptions. Requires rateLimitOptions to be set. redirect: redirect to a different target. This can either be an internal reCAPTCHA redirect, or an external URL-based redirect via a 302 response. Parameters for this action can be configured via redirectOptions. This action is only supported in Global Security Policies of type CLOUD_ARMOR. throttle: limit client traffic to the configured threshold. Configure parameters for this action in rateLimitOptions. Requires rateLimitOptions to be set for this. + - !ruby/object:Api::Type::Boolean + name: 'preview' + description: | + boolean If set to true, the specified action is not enforced. + - !ruby/object:Api::Type::NestedObject + name: 'rateLimitOptions' + description: | + object Must be specified if the action is "rate_based_ban" or "throttle". Cannot be specified for any other actions. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'rateLimitThreshold' + description: | + object Threshold at which to begin ratelimiting. + properties: + - !ruby/object:Api::Type::Integer + name: 'count' + description: | + integer Number of HTTP(S) requests for calculating the threshold. + - !ruby/object:Api::Type::Integer + name: 'intervalSec' + description: | + integer Interval over which the threshold is computed. + - !ruby/object:Api::Type::String + name: 'conformAction' + description: | + Action to take for requests that are under the configured rate limit threshold. Valid option is "allow" only. + - !ruby/object:Api::Type::String + name: 'exceedAction' + description: | + Action to take for requests that are above the configured rate limit threshold, to either deny with a specified HTTP response code, or redirect to a different endpoint. Valid options are deny(STATUS), where valid values for STATUS are 403, 404, 429, and 502, and redirect, where the redirect parameters come from exceedRedirectOptions below. The redirect action is only supported in Global Security Policies of type CLOUD_ARMOR. + - !ruby/object:Api::Type::NestedObject + name: 'exceedRedirectOptions' + description: | + object Parameters defining the redirect action that is used as the exceed action. Cannot be specified if the exceed action is not redirect. This field is only supported in Global Security Policies of type CLOUD_ARMOR. + properties: + - !ruby/object:Api::Type::Enum + name: 'type' + description: | + Type of the redirect action. + values: + - :VALUE_1 + + default_value: :VALUE_1 + - !ruby/object:Api::Type::String + name: 'target' + description: | + Target for the redirect action. This is required if the type is EXTERNAL_302 and cannot be specified for GOOGLE_RECAPTCHA. + - !ruby/object:Api::Type::Enum + name: 'enforceOnKey' + description: | + Determines the key to enforce the rateLimitThreshold on. Possible values are: ALL: A single rate limit threshold is applied to all the requests matching this rule. This is the default value if "enforceOnKey" is not configured. IP: The source IP address of the request is the key. Each IP has this limit enforced separately. HTTP_HEADER: The value of the HTTP header whose name is configured under "enforceOnKeyName". The key value is truncated to the first 128 bytes of the header value. If no such header is present in the request, the key type defaults to ALL. XFF_IP: The first IP address (i.e. the originating client IP address) specified in the list of IPs under X-Forwarded-For HTTP header. If no such header is present or the value is not a valid IP, the key defaults to the source IP address of the request i.e. key type IP. HTTP_COOKIE: The value of the HTTP cookie whose name is configured under "enforceOnKeyName". The key value is truncated to the first 128 bytes of the cookie value. If no such cookie is present in the request, the key type defaults to ALL. HTTP_PATH: The URL path of the HTTP request. The key value is truncated to the first 128 bytes. SNI: Server name indication in the TLS session of the HTTPS request. The key value is truncated to the first 128 bytes. The key type defaults to ALL on a HTTP session. REGION_CODE: The country/region from which the request originates. + values: + - :ALL + - :A + - :IP + - :IP + - :IP + - :HTTP_HEADER + - :HTTP + - :128 + - :ALL + - :XFF_IP + - :IP + - :IP + - :X + - :HTTP + - :IP + - :IP + - :IP + - :HTTP_COOKIE + - :HTTP + - :128 + - :ALL + - :HTTP_PATH + - :URL + - :HTTP + - :128 + - :SNI + - :TLS + - :HTTPS + - :128 + - :ALL + - :HTTP + - :REGION_CODE + + default_value: :ALL + - !ruby/object:Api::Type::String + name: 'enforceOnKeyName' + description: | + Rate limit key name applicable only for the following key types: HTTP_HEADER -- Name of the HTTP header whose value is taken as the key value. HTTP_COOKIE -- Name of the HTTP cookie whose value is taken as the key value. + - !ruby/object:Api::Type::Array + name: 'enforceOnKeyConfigs' + description: | + object If specified, any combination of values of enforceOnKeyType/enforceOnKeyName is treated as the key on which ratelimit threshold/action is enforced. You can specify up to 3 enforceOnKeyConfigs. If enforceOnKeyConfigs is specified, enforceOnKey must not be specified. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::Enum + name: 'enforceOnKeyType' + description: | + Determines the key to enforce the rateLimitThreshold on. Possible values are: ALL: A single rate limit threshold is applied to all the requests matching this rule. This is the default value if "enforceOnKeyConfigs" is not configured. IP: The source IP address of the request is the key. Each IP has this limit enforced separately. HTTP_HEADER: The value of the HTTP header whose name is configured under "enforceOnKeyName". The key value is truncated to the first 128 bytes of the header value. If no such header is present in the request, the key type defaults to ALL. XFF_IP: The first IP address (i.e. the originating client IP address) specified in the list of IPs under X-Forwarded-For HTTP header. If no such header is present or the value is not a valid IP, the key defaults to the source IP address of the request i.e. key type IP. HTTP_COOKIE: The value of the HTTP cookie whose name is configured under "enforceOnKeyName". The key value is truncated to the first 128 bytes of the cookie value. If no such cookie is present in the request, the key type defaults to ALL. HTTP_PATH: The URL path of the HTTP request. The key value is truncated to the first 128 bytes. SNI: Server name indication in the TLS session of the HTTPS request. The key value is truncated to the first 128 bytes. The key type defaults to ALL on a HTTP session. REGION_CODE: The country/region from which the request originates. + values: + - :ALL + - :A + - :IP + - :IP + - :IP + - :HTTP_HEADER + - :HTTP + - :128 + - :ALL + - :XFF_IP + - :IP + - :IP + - :X + - :HTTP + - :IP + - :IP + - :IP + - :HTTP_COOKIE + - :HTTP + - :128 + - :ALL + - :HTTP_PATH + - :URL + - :HTTP + - :128 + - :SNI + - :TLS + - :HTTPS + - :128 + - :ALL + - :HTTP + - :REGION_CODE + + default_value: :ALL + - !ruby/object:Api::Type::String + name: 'enforceOnKeyName' + description: | + Rate limit key name applicable only for the following key types: HTTP_HEADER -- Name of the HTTP header whose value is taken as the key value. HTTP_COOKIE -- Name of the HTTP cookie whose value is taken as the key value. + - !ruby/object:Api::Type::NestedObject + name: 'banThreshold' + description: | + object Can only be specified if the action for the rule is "rate_based_ban". If specified, the key will be banned for the configured 'banDurationSec' when the number of requests that exceed the 'rateLimitThreshold' also exceed this 'banThreshold'. + properties: + - !ruby/object:Api::Type::Integer + name: 'count' + description: | + integer Number of HTTP(S) requests for calculating the threshold. + - !ruby/object:Api::Type::Integer + name: 'intervalSec' + description: | + integer Interval over which the threshold is computed. + - !ruby/object:Api::Type::Integer + name: 'banDurationSec' + description: | + integer Can only be specified if the action for the rule is "rate_based_ban". If specified, determines the time (in seconds) the traffic will continue to be banned by the rate limit after the rate falls below the threshold. + - !ruby/object:Api::Type::NestedObject + name: 'headerAction' + description: | + object Optional, additional actions that are performed on headers. This field is only supported in Global Security Policies of type CLOUD_ARMOR. + properties: + - !ruby/object:Api::Type::Array + name: 'requestHeadersToAdds' + description: | + object The list of request headers to add or overwrite if they're already present. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'headerName' + description: | + The name of the header to set. + - !ruby/object:Api::Type::String + name: 'headerValue' + description: | + The value to set the named header to. + - !ruby/object:Api::Type::NestedObject + name: 'redirectOptions' + description: | + object Parameters defining the redirect action. Cannot be specified for any other actions. This field is only supported in Global Security Policies of type CLOUD_ARMOR. + properties: + - !ruby/object:Api::Type::Enum + name: 'type' + description: | + Type of the redirect action. + values: + - :VALUE_1 + + default_value: :VALUE_1 + - !ruby/object:Api::Type::String + name: 'target' + description: | + Target for the redirect action. This is required if the type is EXTERNAL_302 and cannot be specified for GOOGLE_RECAPTCHA. + - !ruby/object:Api::Type::NestedObject + name: 'preconfiguredWafConfig' + description: | + object Preconfigured WAF configuration to be applied for the rule. If the rule does not evaluate preconfigured WAF rules, i.e., if evaluatePreconfiguredWaf() is not used, this field will have no effect. + properties: + - !ruby/object:Api::Type::Array + name: 'exclusions' + description: | + object A list of exclusions to apply during preconfigured WAF evaluation. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'targetRuleSet' + description: | + Target WAF rule set to apply the preconfigured WAF exclusion. + - !ruby/object:Api::Type::Array + name: 'targetRuleIds' + description: | + A list of target rule IDs under the WAF rule set to apply the preconfigured WAF exclusion. If omitted, it refers to all the rule IDs under the WAF rule set. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'requestHeadersToExclude' + description: | + object A list of request header names whose value will be excluded from inspection during preconfigured WAF evaluation. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'val' + description: | + The value of the field. + - !ruby/object:Api::Type::Enum + name: 'op' + description: | + The match operator for the field. + values: + - :VALUE_1 + + default_value: :VALUE_1 + - !ruby/object:Api::Type::Array + name: 'requestCookiesToExclude' + description: | + object A list of request cookie names whose value will be excluded from inspection during preconfigured WAF evaluation. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'val' + description: | + The value of the field. + - !ruby/object:Api::Type::Enum + name: 'op' + description: | + The match operator for the field. + values: + - :VALUE_1 + + default_value: :VALUE_1 + - !ruby/object:Api::Type::Array + name: 'requestQueryParamsToExclude' + description: | + object A list of request query parameter names whose value will be excluded from inspection during preconfigured WAF evaluation. Note that the parameter can be in the query string or in the POST body. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'val' + description: | + The value of the field. + - !ruby/object:Api::Type::Enum + name: 'op' + description: | + The match operator for the field. + values: + - :VALUE_1 + + default_value: :VALUE_1 + - !ruby/object:Api::Type::Array + name: 'requestUrisToExclude' + description: | + object A list of request URIs from the request line to be excluded from inspection during preconfigured WAF evaluation. When specifying this field, the query or fragment part should be excluded. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'val' + description: | + The value of the field. + - !ruby/object:Api::Type::Enum + name: 'op' + description: | + The match operator for the field. + values: + - :VALUE_1 + + default_value: :VALUE_1 + - !ruby/object:Api::Type::NestedObject + name: 'adaptiveProtectionConfig' + description: | + object + properties: + - !ruby/object:Api::Type::NestedObject + name: 'layer7DdosDefenseConfig' + description: | + object If set to true, enables Cloud Armor Machine Learning. + properties: + - !ruby/object:Api::Type::Boolean + name: 'enable' + description: | + boolean If set to true, enables CAAP for L7 DDoS detection. This field is only supported in Global Security Policies of type CLOUD_ARMOR. + - !ruby/object:Api::Type::Enum + name: 'ruleVisibility' + description: | + Rule visibility can be one of the following: STANDARD - opaque rules. (default) PREMIUM - transparent rules. This field is only supported in Global Security Policies of type CLOUD_ARMOR. + values: + - :STANDARD + - :PREMIUM + - :CLOUD_ARMOR + + default_value: :STANDARD + - !ruby/object:Api::Type::Array + name: 'thresholdConfigs' + description: | + object Configuration options for layer7 adaptive protection for various customizable thresholds. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'name' + description: | + The name must be 1-63 characters long, and comply with RFC1035. The name must be unique within the security policy. + - !ruby/object:Api::Type::Integer + name: 'autoDeployLoadThreshold' + description: | + number + - !ruby/object:Api::Type::Integer + name: 'autoDeployConfidenceThreshold' + description: | + number + - !ruby/object:Api::Type::Integer + name: 'autoDeployImpactedBaselineThreshold' + description: | + number + - !ruby/object:Api::Type::Integer + name: 'autoDeployExpirationSec' + description: | + integer + - !ruby/object:Api::Type::NestedObject + name: 'ddosProtectionConfig' + description: | + object + properties: + - !ruby/object:Api::Type::Enum + name: 'ddosProtection' + description: | + + values: + - :VALUE_1 + + default_value: :VALUE_1 + - !ruby/object:Api::Type::NestedObject + name: 'advancedOptionsConfig' + description: | + object + properties: + - !ruby/object:Api::Type::Enum + name: 'jsonParsing' + description: | + + values: + - :VALUE_1 + + default_value: :VALUE_1 + - !ruby/object:Api::Type::NestedObject + name: 'jsonCustomConfig' + description: | + object Custom configuration to apply the JSON parsing. Only applicable when jsonParsing is set to STANDARD. + properties: + - !ruby/object:Api::Type::Array + name: 'contentTypes' + description: | + A list of custom Content-Type header values to apply the JSON parsing. As per RFC 1341, a Content-Type header value has the following format: Content-Type := type "/" subtype *[";" parameter] When configuring a custom Content-Type header value, only the type/subtype needs to be specified, and the parameters should be excluded. + item_type: Api::Type::String + - !ruby/object:Api::Type::Enum + name: 'logLevel' + description: | + + values: + - :VALUE_1 + + default_value: :VALUE_1 + - !ruby/object:Api::Type::NestedObject + name: 'recaptchaOptionsConfig' + description: | + object + properties: + - !ruby/object:Api::Type::String + name: 'redirectSiteKey' + description: | + An optional field to supply a reCAPTCHA site key to be used for all the rules using the redirect action with the type of GOOGLE_RECAPTCHA under the security policy. The specified site key needs to be created from the reCAPTCHA API. The user is responsible for the validity of the specified site key. If not specified, a Google-managed site key is used. This field is only supported in Global Security Policies of type CLOUD_ARMOR. + - !ruby/object:Api::Type::String + name: 'fingerprint' + description: | + string (bytes format) Specifies a fingerprint for this resource, which is essentially a hash of the metadata's contents and used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update metadata. You must always provide an up-to-date fingerprint hash in order to update or change metadata, otherwise the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make get() request to the security policy. A base64-encoded string. + - !ruby/object:Api::Type::String + name: 'selfLink' + description: | + Server-defined URL for the resource. + - !ruby/object:Api::Type::Enum + name: 'type' + description: | + The type indicates the intended use of the security policy. CLOUD_ARMOR: Cloud Armor backend security policies can be configured to filter incoming HTTP requests targeting backend services. They filter requests before they hit the origin servers. CLOUD_ARMOR_EDGE: Cloud Armor edge security policies can be configured to filter incoming HTTP requests targeting backend services (including Cloud CDN-enabled) as well as backend buckets (Cloud Storage). They filter requests before the request is served from Google's cache. CLOUD_ARMOR_INTERNAL_SERVICE: Cloud Armor internal service policies can be configured to filter HTTP requests targeting services managed by Traffic Director in a service mesh. They filter requests before the request is served from the application. CLOUD_ARMOR_NETWORK: Cloud Armor network policies can be configured to filter packets targeting network load balancing resources such as backend services, target pools, target instances, and instances with external IPs. They filter requests before the request is served from the application. This field can be set only at resource creation time. + values: + - :CLOUD_ARMOR + - :HTTP + - :CLOUD_ARMOR_EDGE + - :HTTP + - :CDN + - :CLOUD_ARMOR_INTERNAL_SERVICE + - :HTTP + - :CLOUD_ARMOR_NETWORK + + default_value: :CLOUD_ARMOR + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + map (key: string, value: string) Labels for this resource. These can only be added or modified by the setLabels method. Each label key/value pair must comply with RFC1035. Label values may be empty. + properties: + - !ruby/object:Api::Type::String + name: 'string' + description: | + + - !ruby/object:Api::Type::String + name: 'labelFingerprint' + description: | + string (bytes format) A fingerprint for the labels being applied to this security policy, which is essentially a hash of the labels set used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels. To see the latest fingerprint, make get() request to the security policy. A base64-encoded string. + - !ruby/object:Api::Type::String + name: 'region' + description: | + URL of the region where the regional security policy resides. This field is not applicable to global security policies. + + + + - !ruby/object:Api::Resource + name: 'RegionSecurityPolicy' + kind: 'compute#regionsecuritypolicies' + base_url: 'projects/{{project}}/regions/{{region}}/securityPolicies/{{securityPolicy}}' + collection_url_key: 'items' + input: true + has_self_link: true + description: | + An instance is a virtual machine (VM) hosted on Google's infrastructure. + properties: + - !ruby/object:Api::Type::String + name: 'kind' + description: | + [Output only] Type of the resource. Always compute#securityPolicyfor security policies + - !ruby/object:Api::Type::String + name: 'id' + description: | + string (uint64 format) The unique identifier for the resource. This identifier is defined by the server. + - !ruby/object:Api::Type::String + name: 'creationTimestamp' + description: | + Creation timestamp in RFC3339 text format. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. + - !ruby/object:Api::Type::String + name: 'description' + description: | + An optional description of this resource. Provide this property when you create the resource. + - !ruby/object:Api::Type::Array + name: 'rules' + description: | + object A list of rules that belong to this policy. There must always be a default rule which is a rule with priority 2147483647 and match all condition (for the match condition this means match "*" for srcIpRanges and for the networkMatch condition every field must be either match "*" or not set). If no rules are provided when creating a security policy, a default rule with action "allow" will be added. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'kind' + description: | + [Output only] Type of the resource. Always compute#securityPolicyRule for security policy rules + - !ruby/object:Api::Type::String + name: 'description' + description: | + An optional description of this resource. Provide this property when you create the resource. + - !ruby/object:Api::Type::Integer + name: 'priority' + description: | + integer An integer indicating the priority of a rule in the list. The priority must be a positive value between 0 and 2147483647. Rules are evaluated from highest to lowest priority where 0 is the highest priority and 2147483647 is the lowest priority. + - !ruby/object:Api::Type::NestedObject + name: 'match' + description: | + object A match condition that incoming traffic is evaluated against. If it evaluates to true, the corresponding 'action' is enforced. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'expr' + description: | + object User defined CEVAL expression. A CEVAL expression is used to specify match criteria such as origin.ip, source.region_code and contents in the request header. Expressions containing evaluateThreatIntelligence require Cloud Armor Managed Protection Plus tier and are not supported in Edge Policies nor in Regional Policies. Expressions containing evaluatePreconfiguredExpr('sourceiplist-*') require Cloud Armor Managed Protection Plus tier and are only supported in Global Security Policies. + properties: + - !ruby/object:Api::Type::String + name: 'expression' + description: | + Textual representation of an expression in Common Expression Language syntax. + - !ruby/object:Api::Type::String + name: 'title' + description: | + Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression. + - !ruby/object:Api::Type::String + name: 'description' + description: | + Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. + - !ruby/object:Api::Type::String + name: 'location' + description: | + Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file. + - !ruby/object:Api::Type::Enum + name: 'versionedExpr' + description: | + Preconfigured versioned expression. If this field is specified, config must also be specified. Available preconfigured expressions along with their requirements are: SRC_IPS_V1 - must specify the corresponding srcIpRange field in config. + values: + - :SRC_IPS_V1 + + default_value: :SRC_IPS_V1 + - !ruby/object:Api::Type::NestedObject + name: 'config' + description: | + object The configuration options available when specifying versionedExpr. This field must be specified if versionedExpr is specified and cannot be specified if versionedExpr is not specified. + properties: + - !ruby/object:Api::Type::Array + name: 'srcIpRanges' + description: | + CIDR IP address range. Maximum number of srcIpRanges allowed is 10. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'action' + description: | + The Action to perform when the rule is matched. The following are the valid actions: allow: allow access to target. deny(STATUS): deny access to target, returns the HTTP response code specified. Valid values for STATUS are 403, 404, and 502. rate_based_ban: limit client traffic to the configured threshold and ban the client if the traffic exceeds the threshold. Configure parameters for this action in RateLimitOptions. Requires rateLimitOptions to be set. redirect: redirect to a different target. This can either be an internal reCAPTCHA redirect, or an external URL-based redirect via a 302 response. Parameters for this action can be configured via redirectOptions. This action is only supported in Global Security Policies of type CLOUD_ARMOR. throttle: limit client traffic to the configured threshold. Configure parameters for this action in rateLimitOptions. Requires rateLimitOptions to be set for this. + - !ruby/object:Api::Type::Boolean + name: 'preview' + description: | + boolean If set to true, the specified action is not enforced. + - !ruby/object:Api::Type::NestedObject + name: 'rateLimitOptions' + description: | + object Must be specified if the action is "rate_based_ban" or "throttle". Cannot be specified for any other actions. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'rateLimitThreshold' + description: | + object Threshold at which to begin ratelimiting. + properties: + - !ruby/object:Api::Type::Integer + name: 'count' + description: | + integer Number of HTTP(S) requests for calculating the threshold. + - !ruby/object:Api::Type::Integer + name: 'intervalSec' + description: | + integer Interval over which the threshold is computed. + - !ruby/object:Api::Type::String + name: 'conformAction' + description: | + Action to take for requests that are under the configured rate limit threshold. Valid option is "allow" only. + - !ruby/object:Api::Type::String + name: 'exceedAction' + description: | + Action to take for requests that are above the configured rate limit threshold, to either deny with a specified HTTP response code, or redirect to a different endpoint. Valid options are deny(STATUS), where valid values for STATUS are 403, 404, 429, and 502, and redirect, where the redirect parameters come from exceedRedirectOptions below. The redirect action is only supported in Global Security Policies of type CLOUD_ARMOR. + - !ruby/object:Api::Type::NestedObject + name: 'exceedRedirectOptions' + description: | + object Parameters defining the redirect action that is used as the exceed action. Cannot be specified if the exceed action is not redirect. This field is only supported in Global Security Policies of type CLOUD_ARMOR. + properties: + - !ruby/object:Api::Type::Enum + name: 'type' + description: | + Type of the redirect action. + values: + - :VALUE_1 + + default_value: :VALUE_1 + - !ruby/object:Api::Type::String + name: 'target' + description: | + Target for the redirect action. This is required if the type is EXTERNAL_302 and cannot be specified for GOOGLE_RECAPTCHA. + - !ruby/object:Api::Type::Enum + name: 'enforceOnKey' + description: | + Determines the key to enforce the rateLimitThreshold on. Possible values are: ALL: A single rate limit threshold is applied to all the requests matching this rule. This is the default value if "enforceOnKey" is not configured. IP: The source IP address of the request is the key. Each IP has this limit enforced separately. HTTP_HEADER: The value of the HTTP header whose name is configured under "enforceOnKeyName". The key value is truncated to the first 128 bytes of the header value. If no such header is present in the request, the key type defaults to ALL. XFF_IP: The first IP address (i.e. the originating client IP address) specified in the list of IPs under X-Forwarded-For HTTP header. If no such header is present or the value is not a valid IP, the key defaults to the source IP address of the request i.e. key type IP. HTTP_COOKIE: The value of the HTTP cookie whose name is configured under "enforceOnKeyName". The key value is truncated to the first 128 bytes of the cookie value. If no such cookie is present in the request, the key type defaults to ALL. HTTP_PATH: The URL path of the HTTP request. The key value is truncated to the first 128 bytes. SNI: Server name indication in the TLS session of the HTTPS request. The key value is truncated to the first 128 bytes. The key type defaults to ALL on a HTTP session. REGION_CODE: The country/region from which the request originates. + values: + - :ALL + - :A + - :IP + - :IP + - :IP + - :HTTP_HEADER + - :HTTP + - :128 + - :ALL + - :XFF_IP + - :IP + - :IP + - :X + - :HTTP + - :IP + - :IP + - :IP + - :HTTP_COOKIE + - :HTTP + - :128 + - :ALL + - :HTTP_PATH + - :URL + - :HTTP + - :128 + - :SNI + - :TLS + - :HTTPS + - :128 + - :ALL + - :HTTP + - :REGION_CODE + + default_value: :ALL + - !ruby/object:Api::Type::String + name: 'enforceOnKeyName' + description: | + Rate limit key name applicable only for the following key types: HTTP_HEADER -- Name of the HTTP header whose value is taken as the key value. HTTP_COOKIE -- Name of the HTTP cookie whose value is taken as the key value. + - !ruby/object:Api::Type::Array + name: 'enforceOnKeyConfigs' + description: | + object If specified, any combination of values of enforceOnKeyType/enforceOnKeyName is treated as the key on which ratelimit threshold/action is enforced. You can specify up to 3 enforceOnKeyConfigs. If enforceOnKeyConfigs is specified, enforceOnKey must not be specified. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::Enum + name: 'enforceOnKeyType' + description: | + Determines the key to enforce the rateLimitThreshold on. Possible values are: ALL: A single rate limit threshold is applied to all the requests matching this rule. This is the default value if "enforceOnKeyConfigs" is not configured. IP: The source IP address of the request is the key. Each IP has this limit enforced separately. HTTP_HEADER: The value of the HTTP header whose name is configured under "enforceOnKeyName". The key value is truncated to the first 128 bytes of the header value. If no such header is present in the request, the key type defaults to ALL. XFF_IP: The first IP address (i.e. the originating client IP address) specified in the list of IPs under X-Forwarded-For HTTP header. If no such header is present or the value is not a valid IP, the key defaults to the source IP address of the request i.e. key type IP. HTTP_COOKIE: The value of the HTTP cookie whose name is configured under "enforceOnKeyName". The key value is truncated to the first 128 bytes of the cookie value. If no such cookie is present in the request, the key type defaults to ALL. HTTP_PATH: The URL path of the HTTP request. The key value is truncated to the first 128 bytes. SNI: Server name indication in the TLS session of the HTTPS request. The key value is truncated to the first 128 bytes. The key type defaults to ALL on a HTTP session. REGION_CODE: The country/region from which the request originates. + values: + - :ALL + - :A + - :IP + - :IP + - :IP + - :HTTP_HEADER + - :HTTP + - :128 + - :ALL + - :XFF_IP + - :IP + - :IP + - :X + - :HTTP + - :IP + - :IP + - :IP + - :HTTP_COOKIE + - :HTTP + - :128 + - :ALL + - :HTTP_PATH + - :URL + - :HTTP + - :128 + - :SNI + - :TLS + - :HTTPS + - :128 + - :ALL + - :HTTP + - :REGION_CODE + + default_value: :ALL + - !ruby/object:Api::Type::String + name: 'enforceOnKeyName' + description: | + Rate limit key name applicable only for the following key types: HTTP_HEADER -- Name of the HTTP header whose value is taken as the key value. HTTP_COOKIE -- Name of the HTTP cookie whose value is taken as the key value. + - !ruby/object:Api::Type::NestedObject + name: 'banThreshold' + description: | + object Can only be specified if the action for the rule is "rate_based_ban". If specified, the key will be banned for the configured 'banDurationSec' when the number of requests that exceed the 'rateLimitThreshold' also exceed this 'banThreshold'. + properties: + - !ruby/object:Api::Type::Integer + name: 'count' + description: | + integer Number of HTTP(S) requests for calculating the threshold. + - !ruby/object:Api::Type::Integer + name: 'intervalSec' + description: | + integer Interval over which the threshold is computed. + - !ruby/object:Api::Type::Integer + name: 'banDurationSec' + description: | + integer Can only be specified if the action for the rule is "rate_based_ban". If specified, determines the time (in seconds) the traffic will continue to be banned by the rate limit after the rate falls below the threshold. + - !ruby/object:Api::Type::NestedObject + name: 'headerAction' + description: | + object Optional, additional actions that are performed on headers. This field is only supported in Global Security Policies of type CLOUD_ARMOR. + properties: + - !ruby/object:Api::Type::Array + name: 'requestHeadersToAdds' + description: | + object The list of request headers to add or overwrite if they're already present. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'headerName' + description: | + The name of the header to set. + - !ruby/object:Api::Type::String + name: 'headerValue' + description: | + The value to set the named header to. + - !ruby/object:Api::Type::NestedObject + name: 'redirectOptions' + description: | + object Parameters defining the redirect action. Cannot be specified for any other actions. This field is only supported in Global Security Policies of type CLOUD_ARMOR. + properties: + - !ruby/object:Api::Type::Enum + name: 'type' + description: | + Type of the redirect action. + values: + - :VALUE_1 + + default_value: :VALUE_1 + - !ruby/object:Api::Type::String + name: 'target' + description: | + Target for the redirect action. This is required if the type is EXTERNAL_302 and cannot be specified for GOOGLE_RECAPTCHA. + - !ruby/object:Api::Type::NestedObject + name: 'preconfiguredWafConfig' + description: | + object Preconfigured WAF configuration to be applied for the rule. If the rule does not evaluate preconfigured WAF rules, i.e., if evaluatePreconfiguredWaf() is not used, this field will have no effect. + properties: + - !ruby/object:Api::Type::Array + name: 'exclusions' + description: | + object A list of exclusions to apply during preconfigured WAF evaluation. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'targetRuleSet' + description: | + Target WAF rule set to apply the preconfigured WAF exclusion. + - !ruby/object:Api::Type::Array + name: 'targetRuleIds' + description: | + A list of target rule IDs under the WAF rule set to apply the preconfigured WAF exclusion. If omitted, it refers to all the rule IDs under the WAF rule set. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'requestHeadersToExclude' + description: | + object A list of request header names whose value will be excluded from inspection during preconfigured WAF evaluation. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'val' + description: | + The value of the field. + - !ruby/object:Api::Type::Enum + name: 'op' + description: | + The match operator for the field. + values: + - :VALUE_1 + + default_value: :VALUE_1 + - !ruby/object:Api::Type::Array + name: 'requestCookiesToExclude' + description: | + object A list of request cookie names whose value will be excluded from inspection during preconfigured WAF evaluation. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'val' + description: | + The value of the field. + - !ruby/object:Api::Type::Enum + name: 'op' + description: | + The match operator for the field. + values: + - :VALUE_1 + + default_value: :VALUE_1 + - !ruby/object:Api::Type::Array + name: 'requestQueryParamsToExclude' + description: | + object A list of request query parameter names whose value will be excluded from inspection during preconfigured WAF evaluation. Note that the parameter can be in the query string or in the POST body. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'val' + description: | + The value of the field. + - !ruby/object:Api::Type::Enum + name: 'op' + description: | + The match operator for the field. + values: + - :VALUE_1 + + default_value: :VALUE_1 + - !ruby/object:Api::Type::Array + name: 'requestUrisToExclude' + description: | + object A list of request URIs from the request line to be excluded from inspection during preconfigured WAF evaluation. When specifying this field, the query or fragment part should be excluded. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'val' + description: | + The value of the field. + - !ruby/object:Api::Type::Enum + name: 'op' + description: | + The match operator for the field. + values: + - :VALUE_1 + + default_value: :VALUE_1 + - !ruby/object:Api::Type::NestedObject + name: 'adaptiveProtectionConfig' + description: | + object + properties: + - !ruby/object:Api::Type::NestedObject + name: 'layer7DdosDefenseConfig' + description: | + object If set to true, enables Cloud Armor Machine Learning. + properties: + - !ruby/object:Api::Type::Boolean + name: 'enable' + description: | + boolean If set to true, enables CAAP for L7 DDoS detection. This field is only supported in Global Security Policies of type CLOUD_ARMOR. + - !ruby/object:Api::Type::Enum + name: 'ruleVisibility' + description: | + Rule visibility can be one of the following: STANDARD - opaque rules. (default) PREMIUM - transparent rules. This field is only supported in Global Security Policies of type CLOUD_ARMOR. + values: + - :STANDARD + - :PREMIUM + - :CLOUD_ARMOR + + default_value: :STANDARD + - !ruby/object:Api::Type::Array + name: 'thresholdConfigs' + description: | + object Configuration options for layer7 adaptive protection for various customizable thresholds. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'name' + description: | + The name must be 1-63 characters long, and comply with RFC1035. The name must be unique within the security policy. + - !ruby/object:Api::Type::Integer + name: 'autoDeployLoadThreshold' + description: | + number + - !ruby/object:Api::Type::Integer + name: 'autoDeployConfidenceThreshold' + description: | + number + - !ruby/object:Api::Type::Integer + name: 'autoDeployImpactedBaselineThreshold' + description: | + number + - !ruby/object:Api::Type::Integer + name: 'autoDeployExpirationSec' + description: | + integer + - !ruby/object:Api::Type::NestedObject + name: 'ddosProtectionConfig' + description: | + object + properties: + - !ruby/object:Api::Type::Enum + name: 'ddosProtection' + description: | + + values: + - :VALUE_1 + + default_value: :VALUE_1 + - !ruby/object:Api::Type::NestedObject + name: 'advancedOptionsConfig' + description: | + object + properties: + - !ruby/object:Api::Type::Enum + name: 'jsonParsing' + description: | + + values: + - :VALUE_1 + + default_value: :VALUE_1 + - !ruby/object:Api::Type::NestedObject + name: 'jsonCustomConfig' + description: | + object Custom configuration to apply the JSON parsing. Only applicable when jsonParsing is set to STANDARD. + properties: + - !ruby/object:Api::Type::Array + name: 'contentTypes' + description: | + A list of custom Content-Type header values to apply the JSON parsing. As per RFC 1341, a Content-Type header value has the following format: Content-Type := type "/" subtype *[";" parameter] When configuring a custom Content-Type header value, only the type/subtype needs to be specified, and the parameters should be excluded. + item_type: Api::Type::String + - !ruby/object:Api::Type::Enum + name: 'logLevel' + description: | + + values: + - :VALUE_1 + + default_value: :VALUE_1 + - !ruby/object:Api::Type::NestedObject + name: 'recaptchaOptionsConfig' + description: | + object + properties: + - !ruby/object:Api::Type::String + name: 'redirectSiteKey' + description: | + An optional field to supply a reCAPTCHA site key to be used for all the rules using the redirect action with the type of GOOGLE_RECAPTCHA under the security policy. The specified site key needs to be created from the reCAPTCHA API. The user is responsible for the validity of the specified site key. If not specified, a Google-managed site key is used. This field is only supported in Global Security Policies of type CLOUD_ARMOR. + - !ruby/object:Api::Type::String + name: 'fingerprint' + description: | + string (bytes format) Specifies a fingerprint for this resource, which is essentially a hash of the metadata's contents and used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update metadata. You must always provide an up-to-date fingerprint hash in order to update or change metadata, otherwise the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make get() request to the security policy. A base64-encoded string. + - !ruby/object:Api::Type::String + name: 'selfLink' + description: | + Server-defined URL for the resource. + - !ruby/object:Api::Type::Enum + name: 'type' + description: | + The type indicates the intended use of the security policy. CLOUD_ARMOR: Cloud Armor backend security policies can be configured to filter incoming HTTP requests targeting backend services. They filter requests before they hit the origin servers. CLOUD_ARMOR_EDGE: Cloud Armor edge security policies can be configured to filter incoming HTTP requests targeting backend services (including Cloud CDN-enabled) as well as backend buckets (Cloud Storage). They filter requests before the request is served from Google's cache. CLOUD_ARMOR_INTERNAL_SERVICE: Cloud Armor internal service policies can be configured to filter HTTP requests targeting services managed by Traffic Director in a service mesh. They filter requests before the request is served from the application. CLOUD_ARMOR_NETWORK: Cloud Armor network policies can be configured to filter packets targeting network load balancing resources such as backend services, target pools, target instances, and instances with external IPs. They filter requests before the request is served from the application. This field can be set only at resource creation time. + values: + - :CLOUD_ARMOR + - :HTTP + - :CLOUD_ARMOR_EDGE + - :HTTP + - :CDN + - :CLOUD_ARMOR_INTERNAL_SERVICE + - :HTTP + - :CLOUD_ARMOR_NETWORK + + default_value: :CLOUD_ARMOR + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + map (key: string, value: string) Labels for this resource. These can only be added or modified by the setLabels method. Each label key/value pair must comply with RFC1035. Label values may be empty. + properties: + - !ruby/object:Api::Type::String + name: 'string' + description: | + + - !ruby/object:Api::Type::String + name: 'labelFingerprint' + description: | + string (bytes format) A fingerprint for the labels being applied to this security policy, which is essentially a hash of the labels set used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels. To see the latest fingerprint, make get() request to the security policy. A base64-encoded string. + - !ruby/object:Api::Type::String + name: 'region' + description: | + URL of the region where the regional security policy resides. This field is not applicable to global security policies. + + + + - !ruby/object:Api::Resource + name: 'RegionSecurityPolicy' + kind: 'compute#regionsecuritypolicies' + base_url: 'projects/{{project}}/regions/{{region}}/securityPolicies/{{securityPolicy}}' + collection_url_key: 'items' + input: true + has_self_link: true + description: | + An instance is a virtual machine (VM) hosted on Google's infrastructure. + properties: + - !ruby/object:Api::Type::String + name: 'kind' + description: | + [Output only] Type of the resource. Always compute#securityPolicyfor security policies + - !ruby/object:Api::Type::String + name: 'id' + description: | + string (uint64 format) The unique identifier for the resource. This identifier is defined by the server. + - !ruby/object:Api::Type::String + name: 'creationTimestamp' + description: | + Creation timestamp in RFC3339 text format. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. + - !ruby/object:Api::Type::String + name: 'description' + description: | + An optional description of this resource. Provide this property when you create the resource. + - !ruby/object:Api::Type::Array + name: 'rules' + description: | + object A list of rules that belong to this policy. There must always be a default rule which is a rule with priority 2147483647 and match all condition (for the match condition this means match "*" for srcIpRanges and for the networkMatch condition every field must be either match "*" or not set). If no rules are provided when creating a security policy, a default rule with action "allow" will be added. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'kind' + description: | + [Output only] Type of the resource. Always compute#securityPolicyRule for security policy rules + - !ruby/object:Api::Type::String + name: 'description' + description: | + An optional description of this resource. Provide this property when you create the resource. + - !ruby/object:Api::Type::Integer + name: 'priority' + description: | + integer An integer indicating the priority of a rule in the list. The priority must be a positive value between 0 and 2147483647. Rules are evaluated from highest to lowest priority where 0 is the highest priority and 2147483647 is the lowest priority. + - !ruby/object:Api::Type::NestedObject + name: 'match' + description: | + object A match condition that incoming traffic is evaluated against. If it evaluates to true, the corresponding 'action' is enforced. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'expr' + description: | + object User defined CEVAL expression. A CEVAL expression is used to specify match criteria such as origin.ip, source.region_code and contents in the request header. Expressions containing evaluateThreatIntelligence require Cloud Armor Managed Protection Plus tier and are not supported in Edge Policies nor in Regional Policies. Expressions containing evaluatePreconfiguredExpr('sourceiplist-*') require Cloud Armor Managed Protection Plus tier and are only supported in Global Security Policies. + properties: + - !ruby/object:Api::Type::String + name: 'expression' + description: | + Textual representation of an expression in Common Expression Language syntax. + - !ruby/object:Api::Type::String + name: 'title' + description: | + Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression. + - !ruby/object:Api::Type::String + name: 'description' + description: | + Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. + - !ruby/object:Api::Type::String + name: 'location' + description: | + Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file. + - !ruby/object:Api::Type::Enum + name: 'versionedExpr' + description: | + Preconfigured versioned expression. If this field is specified, config must also be specified. Available preconfigured expressions along with their requirements are: SRC_IPS_V1 - must specify the corresponding srcIpRange field in config. + values: + - :SRC_IPS_V1 + + default_value: :SRC_IPS_V1 + - !ruby/object:Api::Type::NestedObject + name: 'config' + description: | + object The configuration options available when specifying versionedExpr. This field must be specified if versionedExpr is specified and cannot be specified if versionedExpr is not specified. + properties: + - !ruby/object:Api::Type::Array + name: 'srcIpRanges' + description: | + CIDR IP address range. Maximum number of srcIpRanges allowed is 10. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'action' + description: | + The Action to perform when the rule is matched. The following are the valid actions: allow: allow access to target. deny(STATUS): deny access to target, returns the HTTP response code specified. Valid values for STATUS are 403, 404, and 502. rate_based_ban: limit client traffic to the configured threshold and ban the client if the traffic exceeds the threshold. Configure parameters for this action in RateLimitOptions. Requires rateLimitOptions to be set. redirect: redirect to a different target. This can either be an internal reCAPTCHA redirect, or an external URL-based redirect via a 302 response. Parameters for this action can be configured via redirectOptions. This action is only supported in Global Security Policies of type CLOUD_ARMOR. throttle: limit client traffic to the configured threshold. Configure parameters for this action in rateLimitOptions. Requires rateLimitOptions to be set for this. + - !ruby/object:Api::Type::Boolean + name: 'preview' + description: | + boolean If set to true, the specified action is not enforced. + - !ruby/object:Api::Type::NestedObject + name: 'rateLimitOptions' + description: | + object Must be specified if the action is "rate_based_ban" or "throttle". Cannot be specified for any other actions. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'rateLimitThreshold' + description: | + object Threshold at which to begin ratelimiting. + properties: + - !ruby/object:Api::Type::Integer + name: 'count' + description: | + integer Number of HTTP(S) requests for calculating the threshold. + - !ruby/object:Api::Type::Integer + name: 'intervalSec' + description: | + integer Interval over which the threshold is computed. + - !ruby/object:Api::Type::String + name: 'conformAction' + description: | + Action to take for requests that are under the configured rate limit threshold. Valid option is "allow" only. + - !ruby/object:Api::Type::String + name: 'exceedAction' + description: | + Action to take for requests that are above the configured rate limit threshold, to either deny with a specified HTTP response code, or redirect to a different endpoint. Valid options are deny(STATUS), where valid values for STATUS are 403, 404, 429, and 502, and redirect, where the redirect parameters come from exceedRedirectOptions below. The redirect action is only supported in Global Security Policies of type CLOUD_ARMOR. + - !ruby/object:Api::Type::NestedObject + name: 'exceedRedirectOptions' + description: | + object Parameters defining the redirect action that is used as the exceed action. Cannot be specified if the exceed action is not redirect. This field is only supported in Global Security Policies of type CLOUD_ARMOR. + properties: + - !ruby/object:Api::Type::Enum + name: 'type' + description: | + Type of the redirect action. + values: + - :VALUE_1 + + default_value: :VALUE_1 + - !ruby/object:Api::Type::String + name: 'target' + description: | + Target for the redirect action. This is required if the type is EXTERNAL_302 and cannot be specified for GOOGLE_RECAPTCHA. + - !ruby/object:Api::Type::Enum + name: 'enforceOnKey' + description: | + Determines the key to enforce the rateLimitThreshold on. Possible values are: ALL: A single rate limit threshold is applied to all the requests matching this rule. This is the default value if "enforceOnKey" is not configured. IP: The source IP address of the request is the key. Each IP has this limit enforced separately. HTTP_HEADER: The value of the HTTP header whose name is configured under "enforceOnKeyName". The key value is truncated to the first 128 bytes of the header value. If no such header is present in the request, the key type defaults to ALL. XFF_IP: The first IP address (i.e. the originating client IP address) specified in the list of IPs under X-Forwarded-For HTTP header. If no such header is present or the value is not a valid IP, the key defaults to the source IP address of the request i.e. key type IP. HTTP_COOKIE: The value of the HTTP cookie whose name is configured under "enforceOnKeyName". The key value is truncated to the first 128 bytes of the cookie value. If no such cookie is present in the request, the key type defaults to ALL. HTTP_PATH: The URL path of the HTTP request. The key value is truncated to the first 128 bytes. SNI: Server name indication in the TLS session of the HTTPS request. The key value is truncated to the first 128 bytes. The key type defaults to ALL on a HTTP session. REGION_CODE: The country/region from which the request originates. + values: + - :ALL + - :A + - :IP + - :IP + - :IP + - :HTTP_HEADER + - :HTTP + - :128 + - :ALL + - :XFF_IP + - :IP + - :IP + - :X + - :HTTP + - :IP + - :IP + - :IP + - :HTTP_COOKIE + - :HTTP + - :128 + - :ALL + - :HTTP_PATH + - :URL + - :HTTP + - :128 + - :SNI + - :TLS + - :HTTPS + - :128 + - :ALL + - :HTTP + - :REGION_CODE + + default_value: :ALL + - !ruby/object:Api::Type::String + name: 'enforceOnKeyName' + description: | + Rate limit key name applicable only for the following key types: HTTP_HEADER -- Name of the HTTP header whose value is taken as the key value. HTTP_COOKIE -- Name of the HTTP cookie whose value is taken as the key value. + - !ruby/object:Api::Type::Array + name: 'enforceOnKeyConfigs' + description: | + object If specified, any combination of values of enforceOnKeyType/enforceOnKeyName is treated as the key on which ratelimit threshold/action is enforced. You can specify up to 3 enforceOnKeyConfigs. If enforceOnKeyConfigs is specified, enforceOnKey must not be specified. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::Enum + name: 'enforceOnKeyType' + description: | + Determines the key to enforce the rateLimitThreshold on. Possible values are: ALL: A single rate limit threshold is applied to all the requests matching this rule. This is the default value if "enforceOnKeyConfigs" is not configured. IP: The source IP address of the request is the key. Each IP has this limit enforced separately. HTTP_HEADER: The value of the HTTP header whose name is configured under "enforceOnKeyName". The key value is truncated to the first 128 bytes of the header value. If no such header is present in the request, the key type defaults to ALL. XFF_IP: The first IP address (i.e. the originating client IP address) specified in the list of IPs under X-Forwarded-For HTTP header. If no such header is present or the value is not a valid IP, the key defaults to the source IP address of the request i.e. key type IP. HTTP_COOKIE: The value of the HTTP cookie whose name is configured under "enforceOnKeyName". The key value is truncated to the first 128 bytes of the cookie value. If no such cookie is present in the request, the key type defaults to ALL. HTTP_PATH: The URL path of the HTTP request. The key value is truncated to the first 128 bytes. SNI: Server name indication in the TLS session of the HTTPS request. The key value is truncated to the first 128 bytes. The key type defaults to ALL on a HTTP session. REGION_CODE: The country/region from which the request originates. + values: + - :ALL + - :A + - :IP + - :IP + - :IP + - :HTTP_HEADER + - :HTTP + - :128 + - :ALL + - :XFF_IP + - :IP + - :IP + - :X + - :HTTP + - :IP + - :IP + - :IP + - :HTTP_COOKIE + - :HTTP + - :128 + - :ALL + - :HTTP_PATH + - :URL + - :HTTP + - :128 + - :SNI + - :TLS + - :HTTPS + - :128 + - :ALL + - :HTTP + - :REGION_CODE + + default_value: :ALL + - !ruby/object:Api::Type::String + name: 'enforceOnKeyName' + description: | + Rate limit key name applicable only for the following key types: HTTP_HEADER -- Name of the HTTP header whose value is taken as the key value. HTTP_COOKIE -- Name of the HTTP cookie whose value is taken as the key value. + - !ruby/object:Api::Type::NestedObject + name: 'banThreshold' + description: | + object Can only be specified if the action for the rule is "rate_based_ban". If specified, the key will be banned for the configured 'banDurationSec' when the number of requests that exceed the 'rateLimitThreshold' also exceed this 'banThreshold'. + properties: + - !ruby/object:Api::Type::Integer + name: 'count' + description: | + integer Number of HTTP(S) requests for calculating the threshold. + - !ruby/object:Api::Type::Integer + name: 'intervalSec' + description: | + integer Interval over which the threshold is computed. + - !ruby/object:Api::Type::Integer + name: 'banDurationSec' + description: | + integer Can only be specified if the action for the rule is "rate_based_ban". If specified, determines the time (in seconds) the traffic will continue to be banned by the rate limit after the rate falls below the threshold. + - !ruby/object:Api::Type::NestedObject + name: 'headerAction' + description: | + object Optional, additional actions that are performed on headers. This field is only supported in Global Security Policies of type CLOUD_ARMOR. + properties: + - !ruby/object:Api::Type::Array + name: 'requestHeadersToAdds' + description: | + object The list of request headers to add or overwrite if they're already present. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'headerName' + description: | + The name of the header to set. + - !ruby/object:Api::Type::String + name: 'headerValue' + description: | + The value to set the named header to. + - !ruby/object:Api::Type::NestedObject + name: 'redirectOptions' + description: | + object Parameters defining the redirect action. Cannot be specified for any other actions. This field is only supported in Global Security Policies of type CLOUD_ARMOR. + properties: + - !ruby/object:Api::Type::Enum + name: 'type' + description: | + Type of the redirect action. + values: + - :VALUE_1 + + default_value: :VALUE_1 + - !ruby/object:Api::Type::String + name: 'target' + description: | + Target for the redirect action. This is required if the type is EXTERNAL_302 and cannot be specified for GOOGLE_RECAPTCHA. + - !ruby/object:Api::Type::NestedObject + name: 'preconfiguredWafConfig' + description: | + object Preconfigured WAF configuration to be applied for the rule. If the rule does not evaluate preconfigured WAF rules, i.e., if evaluatePreconfiguredWaf() is not used, this field will have no effect. + properties: + - !ruby/object:Api::Type::Array + name: 'exclusions' + description: | + object A list of exclusions to apply during preconfigured WAF evaluation. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'targetRuleSet' + description: | + Target WAF rule set to apply the preconfigured WAF exclusion. + - !ruby/object:Api::Type::Array + name: 'targetRuleIds' + description: | + A list of target rule IDs under the WAF rule set to apply the preconfigured WAF exclusion. If omitted, it refers to all the rule IDs under the WAF rule set. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'requestHeadersToExclude' + description: | + object A list of request header names whose value will be excluded from inspection during preconfigured WAF evaluation. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'val' + description: | + The value of the field. + - !ruby/object:Api::Type::Enum + name: 'op' + description: | + The match operator for the field. + values: + - :VALUE_1 + + default_value: :VALUE_1 + - !ruby/object:Api::Type::Array + name: 'requestCookiesToExclude' + description: | + object A list of request cookie names whose value will be excluded from inspection during preconfigured WAF evaluation. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'val' + description: | + The value of the field. + - !ruby/object:Api::Type::Enum + name: 'op' + description: | + The match operator for the field. + values: + - :VALUE_1 + + default_value: :VALUE_1 + - !ruby/object:Api::Type::Array + name: 'requestQueryParamsToExclude' + description: | + object A list of request query parameter names whose value will be excluded from inspection during preconfigured WAF evaluation. Note that the parameter can be in the query string or in the POST body. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'val' + description: | + The value of the field. + - !ruby/object:Api::Type::Enum + name: 'op' + description: | + The match operator for the field. + values: + - :VALUE_1 + + default_value: :VALUE_1 + - !ruby/object:Api::Type::Array + name: 'requestUrisToExclude' + description: | + object A list of request URIs from the request line to be excluded from inspection during preconfigured WAF evaluation. When specifying this field, the query or fragment part should be excluded. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'val' + description: | + The value of the field. + - !ruby/object:Api::Type::Enum + name: 'op' + description: | + The match operator for the field. + values: + - :VALUE_1 + + default_value: :VALUE_1 + - !ruby/object:Api::Type::NestedObject + name: 'adaptiveProtectionConfig' + description: | + object + properties: + - !ruby/object:Api::Type::NestedObject + name: 'layer7DdosDefenseConfig' + description: | + object If set to true, enables Cloud Armor Machine Learning. + properties: + - !ruby/object:Api::Type::Boolean + name: 'enable' + description: | + boolean If set to true, enables CAAP for L7 DDoS detection. This field is only supported in Global Security Policies of type CLOUD_ARMOR. + - !ruby/object:Api::Type::Enum + name: 'ruleVisibility' + description: | + Rule visibility can be one of the following: STANDARD - opaque rules. (default) PREMIUM - transparent rules. This field is only supported in Global Security Policies of type CLOUD_ARMOR. + values: + - :STANDARD + - :PREMIUM + - :CLOUD_ARMOR + + default_value: :STANDARD + - !ruby/object:Api::Type::Array + name: 'thresholdConfigs' + description: | + object Configuration options for layer7 adaptive protection for various customizable thresholds. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'name' + description: | + The name must be 1-63 characters long, and comply with RFC1035. The name must be unique within the security policy. + - !ruby/object:Api::Type::Integer + name: 'autoDeployLoadThreshold' + description: | + number + - !ruby/object:Api::Type::Integer + name: 'autoDeployConfidenceThreshold' + description: | + number + - !ruby/object:Api::Type::Integer + name: 'autoDeployImpactedBaselineThreshold' + description: | + number + - !ruby/object:Api::Type::Integer + name: 'autoDeployExpirationSec' + description: | + integer + - !ruby/object:Api::Type::NestedObject + name: 'ddosProtectionConfig' + description: | + object + properties: + - !ruby/object:Api::Type::Enum + name: 'ddosProtection' + description: | + + values: + - :VALUE_1 + + default_value: :VALUE_1 + - !ruby/object:Api::Type::NestedObject + name: 'advancedOptionsConfig' + description: | + object + properties: + - !ruby/object:Api::Type::Enum + name: 'jsonParsing' + description: | + + values: + - :VALUE_1 + + default_value: :VALUE_1 + - !ruby/object:Api::Type::NestedObject + name: 'jsonCustomConfig' + description: | + object Custom configuration to apply the JSON parsing. Only applicable when jsonParsing is set to STANDARD. + properties: + - !ruby/object:Api::Type::Array + name: 'contentTypes' + description: | + A list of custom Content-Type header values to apply the JSON parsing. As per RFC 1341, a Content-Type header value has the following format: Content-Type := type "/" subtype *[";" parameter] When configuring a custom Content-Type header value, only the type/subtype needs to be specified, and the parameters should be excluded. + item_type: Api::Type::String + - !ruby/object:Api::Type::Enum + name: 'logLevel' + description: | + + values: + - :VALUE_1 + + default_value: :VALUE_1 + - !ruby/object:Api::Type::NestedObject + name: 'recaptchaOptionsConfig' + description: | + object + properties: + - !ruby/object:Api::Type::String + name: 'redirectSiteKey' + description: | + An optional field to supply a reCAPTCHA site key to be used for all the rules using the redirect action with the type of GOOGLE_RECAPTCHA under the security policy. The specified site key needs to be created from the reCAPTCHA API. The user is responsible for the validity of the specified site key. If not specified, a Google-managed site key is used. This field is only supported in Global Security Policies of type CLOUD_ARMOR. + - !ruby/object:Api::Type::String + name: 'fingerprint' + description: | + string (bytes format) Specifies a fingerprint for this resource, which is essentially a hash of the metadata's contents and used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update metadata. You must always provide an up-to-date fingerprint hash in order to update or change metadata, otherwise the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make get() request to the security policy. A base64-encoded string. + - !ruby/object:Api::Type::String + name: 'selfLink' + description: | + Server-defined URL for the resource. + - !ruby/object:Api::Type::Enum + name: 'type' + description: | + The type indicates the intended use of the security policy. CLOUD_ARMOR: Cloud Armor backend security policies can be configured to filter incoming HTTP requests targeting backend services. They filter requests before they hit the origin servers. CLOUD_ARMOR_EDGE: Cloud Armor edge security policies can be configured to filter incoming HTTP requests targeting backend services (including Cloud CDN-enabled) as well as backend buckets (Cloud Storage). They filter requests before the request is served from Google's cache. CLOUD_ARMOR_INTERNAL_SERVICE: Cloud Armor internal service policies can be configured to filter HTTP requests targeting services managed by Traffic Director in a service mesh. They filter requests before the request is served from the application. CLOUD_ARMOR_NETWORK: Cloud Armor network policies can be configured to filter packets targeting network load balancing resources such as backend services, target pools, target instances, and instances with external IPs. They filter requests before the request is served from the application. This field can be set only at resource creation time. + values: + - :CLOUD_ARMOR + - :HTTP + - :CLOUD_ARMOR_EDGE + - :HTTP + - :CDN + - :CLOUD_ARMOR_INTERNAL_SERVICE + - :HTTP + - :CLOUD_ARMOR_NETWORK + + default_value: :CLOUD_ARMOR + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + map (key: string, value: string) Labels for this resource. These can only be added or modified by the setLabels method. Each label key/value pair must comply with RFC1035. Label values may be empty. + properties: + - !ruby/object:Api::Type::String + name: 'string' + description: | + + - !ruby/object:Api::Type::String + name: 'labelFingerprint' + description: | + string (bytes format) A fingerprint for the labels being applied to this security policy, which is essentially a hash of the labels set used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels. To see the latest fingerprint, make get() request to the security policy. A base64-encoded string. + - !ruby/object:Api::Type::String + name: 'region' + description: | + URL of the region where the regional security policy resides. This field is not applicable to global security policies. + + + + + - !ruby/object:Api::Resource + name: RegionNotificationEndpoint + base_url: 'projects/{{project}}/regions/{{region}}/notificationEndpoints' + self_link: 'projects/{{project}}/regions/{{region}}/notificationEndpoints/{{notificationEndpoint}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/compute/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + Represents a notification endpoint. A notification endpoint resource defines an endpoint to receive notifications when there are status changes detected by the associated health check service. For more information, see Health checks overview. + properties: + + - !ruby/object:Api::Type::String + name: 'kind' + description: | + [Output Only] Type of the resource. Always compute#notificationEndpoint for notification endpoints. + - !ruby/object:Api::Type::String + name: 'id' + description: | + [Output Only] A unique identifier for this resource type. The server generates this identifier. + - !ruby/object:Api::Type::String + name: 'creationTimestamp' + description: | + [Output Only] Creation timestamp in RFC3339 text format. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. + - !ruby/object:Api::Type::String + name: 'description' + description: | + An optional description of this resource. Provide this property when you create the resource. + - !ruby/object:Api::Type::String + name: 'selfLink' + description: | + [Output Only] Server-defined URL for the resource. + - !ruby/object:Api::Type::String + name: 'region' + description: | + [Output Only] URL of the region where the notification endpoint resides. This field applies only to the regional resource. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body. + - !ruby/object:Api::Type::NestedObject + name: 'grpcSettings' + description: | + Represents a gRPC setting that describes one gRPC notification endpoint and the retry duration attempting to send notification to this endpoint. + properties: + - !ruby/object:Api::Type::String + name: 'endpoint' + description: | + Endpoint to which gRPC notifications are sent. This must be a valid gRPCLB DNS name. + - !ruby/object:Api::Type::Integer + name: 'retryDurationSec' + description: | + How much time (in seconds) is spent attempting notification retries until a successful response is received. Default is 30s. Limit is 20m (1200s). Must be a positive number. + - !ruby/object:Api::Type::String + name: 'payloadName' + description: | + Optional. If specified, this field is used to populate the "name" field in gRPC requests. + - !ruby/object:Api::Type::String + name: 'authority' + description: | + Optional. If specified, this field is used to set the authority header by the sender of notifications. See https://tools.ietf.org/html/rfc7540#section-8.1.2.3 + - !ruby/object:Api::Type::NestedObject + name: 'resendInterval' + description: | + A Duration represents a fixed-length span of time represented as a count of seconds and fractions of seconds at nanosecond resolution. It is independent of any calendar and concepts like "day" or "month". Range is approximately 10,000 years. + properties: + - !ruby/object:Api::Type::String + name: 'seconds' + description: | + Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 inclusive. Note: these bounds are computed from: 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years + - !ruby/object:Api::Type::Integer + name: 'nanos' + description: | + Span of time that's a fraction of a second at nanosecond resolution. Durations less than one second are represented with a 0 `seconds` field and a positive `nanos` field. Must be from 0 to 999,999,999 inclusive. + + + + + - !ruby/object:Api::Resource + name: RegionNotificationEndpoint + base_url: 'projects/{{project}}/regions/{{region}}/notificationEndpoints' + self_link: 'projects/{{project}}/regions/{{region}}/notificationEndpoints/{{notificationEndpoint}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/compute/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + Represents a notification endpoint. A notification endpoint resource defines an endpoint to receive notifications when there are status changes detected by the associated health check service. For more information, see Health checks overview. + properties: + + - !ruby/object:Api::Type::String + name: 'kind' + description: | + [Output Only] Type of the resource. Always compute#notificationEndpoint for notification endpoints. + - !ruby/object:Api::Type::String + name: 'id' + description: | + [Output Only] A unique identifier for this resource type. The server generates this identifier. + - !ruby/object:Api::Type::String + name: 'creationTimestamp' + description: | + [Output Only] Creation timestamp in RFC3339 text format. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. + - !ruby/object:Api::Type::String + name: 'description' + description: | + An optional description of this resource. Provide this property when you create the resource. + - !ruby/object:Api::Type::String + name: 'selfLink' + description: | + [Output Only] Server-defined URL for the resource. + - !ruby/object:Api::Type::String + name: 'region' + description: | + [Output Only] URL of the region where the notification endpoint resides. This field applies only to the regional resource. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body. + - !ruby/object:Api::Type::NestedObject + name: 'grpcSettings' + description: | + Represents a gRPC setting that describes one gRPC notification endpoint and the retry duration attempting to send notification to this endpoint. + properties: + - !ruby/object:Api::Type::String + name: 'endpoint' + description: | + Endpoint to which gRPC notifications are sent. This must be a valid gRPCLB DNS name. + - !ruby/object:Api::Type::Integer + name: 'retryDurationSec' + description: | + How much time (in seconds) is spent attempting notification retries until a successful response is received. Default is 30s. Limit is 20m (1200s). Must be a positive number. + - !ruby/object:Api::Type::String + name: 'payloadName' + description: | + Optional. If specified, this field is used to populate the "name" field in gRPC requests. + - !ruby/object:Api::Type::String + name: 'authority' + description: | + Optional. If specified, this field is used to set the authority header by the sender of notifications. See https://tools.ietf.org/html/rfc7540#section-8.1.2.3 + - !ruby/object:Api::Type::NestedObject + name: 'resendInterval' + description: | + A Duration represents a fixed-length span of time represented as a count of seconds and fractions of seconds at nanosecond resolution. It is independent of any calendar and concepts like "day" or "month". Range is approximately 10,000 years. + properties: + - !ruby/object:Api::Type::String + name: 'seconds' + description: | + Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 inclusive. Note: these bounds are computed from: 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years + - !ruby/object:Api::Type::Integer + name: 'nanos' + description: | + Span of time that's a fraction of a second at nanosecond resolution. Durations less than one second are represented with a 0 `seconds` field and a positive `nanos` field. Must be from 0 to 999,999,999 inclusive. + + + + + - !ruby/object:Api::Resource + name: Reservation + base_url: 'projects/{{project}}/zones/{{zone}}/reservations' + self_link: 'projects/{{project}}/zones/{{zone}}/reservations/{{reservation}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/compute/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + Represents a reservation resource. A reservation ensures that capacity is held in a specific zone even if the reserved VMs are not running. For more information, read Reserving zonal resources. + properties: + + - !ruby/object:Api::Type::String + name: 'kind' + description: | + [Output Only] Type of the resource. Always compute#reservations for reservations. + - !ruby/object:Api::Type::String + name: 'id' + description: | + [Output Only] The unique identifier for the resource. This identifier is defined by the server. + - !ruby/object:Api::Type::String + name: 'creationTimestamp' + description: | + [Output Only] Creation timestamp in RFC3339 text format. + - !ruby/object:Api::Type::String + name: 'selfLink' + description: | + [Output Only] Server-defined fully-qualified URL for this resource. + - !ruby/object:Api::Type::String + name: 'selfLinkWithId' + description: | + [Output Only] Server-defined URL for this resource with the resource id. + - !ruby/object:Api::Type::String + name: 'zone' + description: | + Zone in which the reservation resides. A zone must be provided if the reservation is created within a commitment. + - !ruby/object:Api::Type::String + name: 'description' + description: | + An optional description of this resource. Provide this property when you create the resource. + - !ruby/object:Api::Type::String + name: 'name' + description: | + The name of the resource, provided by the client when initially creating the resource. The resource name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. + - !ruby/object:Api::Type::NestedObject + name: 'specificReservation' + description: | + This reservation type allows to pre allocate specific instance configuration. Next ID: 6 + properties: + - !ruby/object:Api::Type::NestedObject + name: 'instanceProperties' + description: | + Properties of the SKU instances being reserved. Next ID: 9 + properties: + - !ruby/object:Api::Type::String + name: 'machineType' + description: | + Specifies type of machine (name only) which has fixed number of vCPUs and fixed amount of memory. This also includes specifying custom machine type following custom-NUMBER_OF_CPUS-AMOUNT_OF_MEMORY pattern. + - !ruby/object:Api::Type::Array + name: 'guestAccelerators' + description: | + Specifies accelerator type and count. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'acceleratorType' + description: | + Full or partial URL of the accelerator type resource to attach to this instance. For example: projects/my-project/zones/us-central1-c/acceleratorTypes/nvidia-tesla-p100 If you are creating an instance template, specify only the accelerator name. See GPUs on Compute Engine for a full list of accelerator types. + - !ruby/object:Api::Type::Integer + name: 'acceleratorCount' + description: | + The number of the guest accelerator cards exposed to this instance. + - !ruby/object:Api::Type::String + name: 'minCpuPlatform' + description: | + Minimum cpu platform the reservation. + - !ruby/object:Api::Type::Array + name: 'localSsds' + description: | + Specifies amount of local ssd to reserve with each instance. The type of disk is local-ssd. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'diskSizeGb' + description: | + Specifies the size of the disk in base-2 GB. + - !ruby/object:Api::Type::Enum + name: 'interface' + description: | + Specifies the disk interface to use for attaching this disk, which is either SCSI or NVME. The default is SCSI. For performance characteristics of SCSI over NVMe, see Local SSD performance. + values: + - :NVDIMM + - :NVME + - :SCSI + - !ruby/object:Api::Type::Integer + name: 'maintenanceFreezeDurationHours' + description: | + Specifies the number of hours after reservation creation where instances using the reservation won't be scheduled for maintenance. + - !ruby/object:Api::Type::String + name: 'locationHint' + description: | + An opaque location hint used to place the allocation close to other resources. This field is for use by internal tools that use the public API. + - !ruby/object:Api::Type::Enum + name: 'maintenanceInterval' + description: | + Specifies the frequency of planned maintenance events. The accepted values are: `PERIODIC`. + values: + - :AS_NEEDED + - :PERIODIC + - :RECURRENT + - !ruby/object:Api::Type::String + name: 'count' + description: | + Specifies the number of resources that are allocated. + - !ruby/object:Api::Type::String + name: 'inUseCount' + description: | + [Output Only] Indicates how many instances are in use. + - !ruby/object:Api::Type::String + name: 'assuredCount' + description: | + [Output Only] Indicates how many instances are actually usable currently. + - !ruby/object:Api::Type::String + name: 'sourceInstanceTemplate' + description: | + Specifies the instance template to create the reservation. If you use this field, you must exclude the instanceProperties field. This field is optional, and it can be a full or partial URL. For example, the following are all valid URLs to an instance template: - https://www.googleapis.com/compute/v1/projects/project /global/instanceTemplates/instanceTemplate - projects/project/global/instanceTemplates/instanceTemplate - global/instanceTemplates/instanceTemplate + - !ruby/object:Api::Type::NestedObject + name: 'aggregateReservation' + description: | + This reservation type is specified by total resource amounts (e.g. total count of CPUs) and can account for multiple instance SKUs. In other words, one can create instances of varying shapes against this reservation. + properties: + - !ruby/object:Api::Type::Enum + name: 'vmFamily' + description: | + The VM family that all instances scheduled against this reservation must belong to. + values: + - :VM_FAMILY_CLOUD_TPU_LITE_DEVICE_CT5L + - :VM_FAMILY_CLOUD_TPU_LITE_POD_SLICE_CT5LP + - :VM_FAMILY_CLOUD_TPU_POD_SLICE_CT4P + - !ruby/object:Api::Type::Array + name: 'reservedResources' + description: | + List of reserved resources (CPUs, memory, accelerators). + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::NestedObject + name: 'accelerator' + description: | + + properties: + - !ruby/object:Api::Type::Integer + name: 'acceleratorCount' + description: | + Number of accelerators of specified type. + - !ruby/object:Api::Type::String + name: 'acceleratorType' + description: | + Full or partial URL to accelerator type. e.g. "projects/{PROJECT}/zones/{ZONE}/acceleratorTypes/ct4l" + - !ruby/object:Api::Type::Array + name: 'inUseResources' + description: | + [Output only] List of resources currently in use. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::NestedObject + name: 'accelerator' + description: | + + properties: + - !ruby/object:Api::Type::Integer + name: 'acceleratorCount' + description: | + Number of accelerators of specified type. + - !ruby/object:Api::Type::String + name: 'acceleratorType' + description: | + Full or partial URL to accelerator type. e.g. "projects/{PROJECT}/zones/{ZONE}/acceleratorTypes/ct4l" + - !ruby/object:Api::Type::Enum + name: 'workloadType' + description: | + The workload type of the instances that will target this reservation. + values: + - :BATCH + - :SERVING + - :UNSPECIFIED + - !ruby/object:Api::Type::String + name: 'commitment' + description: | + [Output Only] Full or partial URL to a parent commitment. This field displays for reservations that are tied to a commitment. + - !ruby/object:Api::Type::Boolean + name: 'specificReservationRequired' + description: | + Indicates whether the reservation can be consumed by VMs with affinity for "any" reservation. If the field is set, then only VMs that target the reservation by name can consume from this reservation. + - !ruby/object:Api::Type::Enum + name: 'status' + description: | + [Output Only] The status of the reservation. + values: + - :CREATING + - :DELETING + - :INVALID + - :READY + - :UPDATING + - !ruby/object:Api::Type::NestedObject + name: 'shareSettings' + description: | + The share setting for reservations and sole tenancy node groups. + properties: + - !ruby/object:Api::Type::Enum + name: 'shareType' + description: | + Type of sharing for this shared-reservation + values: + - :DIRECT_PROJECTS_UNDER_SPECIFIC_FOLDERS + - :LOCAL + - :ORGANIZATION + - :SHARE_TYPE_UNSPECIFIED + - :SPECIFIC_PROJECTS + - !ruby/object:Api::Type::Array + name: 'projects' + description: | + A List of Project names to specify consumer projects for this shared-reservation. This is only valid when share_type's value is SPECIFIC_PROJECTS. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'projectMap' + description: | + A map of project id and project config. This is only valid when share_type's value is SPECIFIC_PROJECTS. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + Config for each project in the share settings. + - !ruby/object:Api::Type::NestedObject + name: 'folderMap' + description: | + A map of folder id and folder config to specify consumer projects for this shared-reservation. This is only valid when share_type's value is DIRECT_PROJECTS_UNDER_SPECIFIC_FOLDERS. Folder id should be a string of number, and without "folders/" prefix. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + Config for each folder in the share settings. + - !ruby/object:Api::Type::Boolean + name: 'satisfiesPzs' + description: | + [Output Only] Reserved for future use. + - !ruby/object:Api::Type::NestedObject + name: 'resourcePolicies' + description: | + Resource policies to be added to this reservation. The key is defined by user, and the value is resource policy url. This is to define placement policy with reservation. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'resourceStatus' + description: | + [Output Only] Contains output only fields. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'specificSkuAllocation' + description: | + Contains Properties set for the reservation. + properties: + - !ruby/object:Api::Type::String + name: 'sourceInstanceTemplateId' + description: | + ID of the instance template used to populate reservation properties. + - !ruby/object:Api::Type::String + name: 'deleteAtTime' + description: | + Absolute time in future when the reservation will be auto-deleted by Compute Engine. Timestamp is represented in RFC3339 text format. + - !ruby/object:Api::Type::NestedObject + name: 'deleteAfterDuration' + description: | + A Duration represents a fixed-length span of time represented as a count of seconds and fractions of seconds at nanosecond resolution. It is independent of any calendar and concepts like "day" or "month". Range is approximately 10,000 years. + properties: + - !ruby/object:Api::Type::String + name: 'seconds' + description: | + Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 inclusive. Note: these bounds are computed from: 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years + - !ruby/object:Api::Type::Integer + name: 'nanos' + description: | + Span of time that's a fraction of a second at nanosecond resolution. Durations less than one second are represented with a 0 `seconds` field and a positive `nanos` field. Must be from 0 to 999,999,999 inclusive. + + + + + - !ruby/object:Api::Resource + name: Reservation + base_url: 'projects/{{project}}/zones/{{zone}}/reservations' + self_link: 'projects/{{project}}/zones/{{zone}}/reservations/{{reservation}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/compute/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + Represents a reservation resource. A reservation ensures that capacity is held in a specific zone even if the reserved VMs are not running. For more information, read Reserving zonal resources. + properties: + + - !ruby/object:Api::Type::String + name: 'kind' + description: | + [Output Only] Type of the resource. Always compute#reservations for reservations. + - !ruby/object:Api::Type::String + name: 'id' + description: | + [Output Only] The unique identifier for the resource. This identifier is defined by the server. + - !ruby/object:Api::Type::String + name: 'creationTimestamp' + description: | + [Output Only] Creation timestamp in RFC3339 text format. + - !ruby/object:Api::Type::String + name: 'selfLink' + description: | + [Output Only] Server-defined fully-qualified URL for this resource. + - !ruby/object:Api::Type::String + name: 'selfLinkWithId' + description: | + [Output Only] Server-defined URL for this resource with the resource id. + - !ruby/object:Api::Type::String + name: 'zone' + description: | + Zone in which the reservation resides. A zone must be provided if the reservation is created within a commitment. + - !ruby/object:Api::Type::String + name: 'description' + description: | + An optional description of this resource. Provide this property when you create the resource. + - !ruby/object:Api::Type::String + name: 'name' + description: | + The name of the resource, provided by the client when initially creating the resource. The resource name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. + - !ruby/object:Api::Type::NestedObject + name: 'specificReservation' + description: | + This reservation type allows to pre allocate specific instance configuration. Next ID: 6 + properties: + - !ruby/object:Api::Type::NestedObject + name: 'instanceProperties' + description: | + Properties of the SKU instances being reserved. Next ID: 9 + properties: + - !ruby/object:Api::Type::String + name: 'machineType' + description: | + Specifies type of machine (name only) which has fixed number of vCPUs and fixed amount of memory. This also includes specifying custom machine type following custom-NUMBER_OF_CPUS-AMOUNT_OF_MEMORY pattern. + - !ruby/object:Api::Type::Array + name: 'guestAccelerators' + description: | + Specifies accelerator type and count. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'acceleratorType' + description: | + Full or partial URL of the accelerator type resource to attach to this instance. For example: projects/my-project/zones/us-central1-c/acceleratorTypes/nvidia-tesla-p100 If you are creating an instance template, specify only the accelerator name. See GPUs on Compute Engine for a full list of accelerator types. + - !ruby/object:Api::Type::Integer + name: 'acceleratorCount' + description: | + The number of the guest accelerator cards exposed to this instance. + - !ruby/object:Api::Type::String + name: 'minCpuPlatform' + description: | + Minimum cpu platform the reservation. + - !ruby/object:Api::Type::Array + name: 'localSsds' + description: | + Specifies amount of local ssd to reserve with each instance. The type of disk is local-ssd. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'diskSizeGb' + description: | + Specifies the size of the disk in base-2 GB. + - !ruby/object:Api::Type::Enum + name: 'interface' + description: | + Specifies the disk interface to use for attaching this disk, which is either SCSI or NVME. The default is SCSI. For performance characteristics of SCSI over NVMe, see Local SSD performance. + values: + - :NVDIMM + - :NVME + - :SCSI + - !ruby/object:Api::Type::Integer + name: 'maintenanceFreezeDurationHours' + description: | + Specifies the number of hours after reservation creation where instances using the reservation won't be scheduled for maintenance. + - !ruby/object:Api::Type::String + name: 'locationHint' + description: | + An opaque location hint used to place the allocation close to other resources. This field is for use by internal tools that use the public API. + - !ruby/object:Api::Type::Enum + name: 'maintenanceInterval' + description: | + Specifies the frequency of planned maintenance events. The accepted values are: `PERIODIC`. + values: + - :AS_NEEDED + - :PERIODIC + - :RECURRENT + - !ruby/object:Api::Type::String + name: 'count' + description: | + Specifies the number of resources that are allocated. + - !ruby/object:Api::Type::String + name: 'inUseCount' + description: | + [Output Only] Indicates how many instances are in use. + - !ruby/object:Api::Type::String + name: 'assuredCount' + description: | + [Output Only] Indicates how many instances are actually usable currently. + - !ruby/object:Api::Type::String + name: 'sourceInstanceTemplate' + description: | + Specifies the instance template to create the reservation. If you use this field, you must exclude the instanceProperties field. This field is optional, and it can be a full or partial URL. For example, the following are all valid URLs to an instance template: - https://www.googleapis.com/compute/v1/projects/project /global/instanceTemplates/instanceTemplate - projects/project/global/instanceTemplates/instanceTemplate - global/instanceTemplates/instanceTemplate + - !ruby/object:Api::Type::NestedObject + name: 'aggregateReservation' + description: | + This reservation type is specified by total resource amounts (e.g. total count of CPUs) and can account for multiple instance SKUs. In other words, one can create instances of varying shapes against this reservation. + properties: + - !ruby/object:Api::Type::Enum + name: 'vmFamily' + description: | + The VM family that all instances scheduled against this reservation must belong to. + values: + - :VM_FAMILY_CLOUD_TPU_LITE_DEVICE_CT5L + - :VM_FAMILY_CLOUD_TPU_LITE_POD_SLICE_CT5LP + - :VM_FAMILY_CLOUD_TPU_POD_SLICE_CT4P + - !ruby/object:Api::Type::Array + name: 'reservedResources' + description: | + List of reserved resources (CPUs, memory, accelerators). + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::NestedObject + name: 'accelerator' + description: | + + properties: + - !ruby/object:Api::Type::Integer + name: 'acceleratorCount' + description: | + Number of accelerators of specified type. + - !ruby/object:Api::Type::String + name: 'acceleratorType' + description: | + Full or partial URL to accelerator type. e.g. "projects/{PROJECT}/zones/{ZONE}/acceleratorTypes/ct4l" + - !ruby/object:Api::Type::Array + name: 'inUseResources' + description: | + [Output only] List of resources currently in use. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::NestedObject + name: 'accelerator' + description: | + + properties: + - !ruby/object:Api::Type::Integer + name: 'acceleratorCount' + description: | + Number of accelerators of specified type. + - !ruby/object:Api::Type::String + name: 'acceleratorType' + description: | + Full or partial URL to accelerator type. e.g. "projects/{PROJECT}/zones/{ZONE}/acceleratorTypes/ct4l" + - !ruby/object:Api::Type::Enum + name: 'workloadType' + description: | + The workload type of the instances that will target this reservation. + values: + - :BATCH + - :SERVING + - :UNSPECIFIED + - !ruby/object:Api::Type::String + name: 'commitment' + description: | + [Output Only] Full or partial URL to a parent commitment. This field displays for reservations that are tied to a commitment. + - !ruby/object:Api::Type::Boolean + name: 'specificReservationRequired' + description: | + Indicates whether the reservation can be consumed by VMs with affinity for "any" reservation. If the field is set, then only VMs that target the reservation by name can consume from this reservation. + - !ruby/object:Api::Type::Enum + name: 'status' + description: | + [Output Only] The status of the reservation. + values: + - :CREATING + - :DELETING + - :INVALID + - :READY + - :UPDATING + - !ruby/object:Api::Type::NestedObject + name: 'shareSettings' + description: | + The share setting for reservations and sole tenancy node groups. + properties: + - !ruby/object:Api::Type::Enum + name: 'shareType' + description: | + Type of sharing for this shared-reservation + values: + - :DIRECT_PROJECTS_UNDER_SPECIFIC_FOLDERS + - :LOCAL + - :ORGANIZATION + - :SHARE_TYPE_UNSPECIFIED + - :SPECIFIC_PROJECTS + - !ruby/object:Api::Type::Array + name: 'projects' + description: | + A List of Project names to specify consumer projects for this shared-reservation. This is only valid when share_type's value is SPECIFIC_PROJECTS. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'projectMap' + description: | + A map of project id and project config. This is only valid when share_type's value is SPECIFIC_PROJECTS. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + Config for each project in the share settings. + - !ruby/object:Api::Type::NestedObject + name: 'folderMap' + description: | + A map of folder id and folder config to specify consumer projects for this shared-reservation. This is only valid when share_type's value is DIRECT_PROJECTS_UNDER_SPECIFIC_FOLDERS. Folder id should be a string of number, and without "folders/" prefix. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + Config for each folder in the share settings. + - !ruby/object:Api::Type::Boolean + name: 'satisfiesPzs' + description: | + [Output Only] Reserved for future use. + - !ruby/object:Api::Type::NestedObject + name: 'resourcePolicies' + description: | + Resource policies to be added to this reservation. The key is defined by user, and the value is resource policy url. This is to define placement policy with reservation. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'resourceStatus' + description: | + [Output Only] Contains output only fields. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'specificSkuAllocation' + description: | + Contains Properties set for the reservation. + properties: + - !ruby/object:Api::Type::String + name: 'sourceInstanceTemplateId' + description: | + ID of the instance template used to populate reservation properties. + - !ruby/object:Api::Type::String + name: 'deleteAtTime' + description: | + Absolute time in future when the reservation will be auto-deleted by Compute Engine. Timestamp is represented in RFC3339 text format. + - !ruby/object:Api::Type::NestedObject + name: 'deleteAfterDuration' + description: | + A Duration represents a fixed-length span of time represented as a count of seconds and fractions of seconds at nanosecond resolution. It is independent of any calendar and concepts like "day" or "month". Range is approximately 10,000 years. + properties: + - !ruby/object:Api::Type::String + name: 'seconds' + description: | + Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 inclusive. Note: these bounds are computed from: 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years + - !ruby/object:Api::Type::Integer + name: 'nanos' + description: | + Span of time that's a fraction of a second at nanosecond resolution. Durations less than one second are represented with a 0 `seconds` field and a positive `nanos` field. Must be from 0 to 999,999,999 inclusive. + + + + + - !ruby/object:Api::Resource + name: RegionSecurityPolicy + base_url: 'projects/{{project}}/regions/{{region}}/securityPolicies' + self_link: 'projects/{{project}}/regions/{{region}}/securityPolicies/{{securityPolicy}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/compute/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + Represents a Google Cloud Armor security policy resource. Only external backend services that use load balancers can reference a security policy. For more information, see Google Cloud Armor security policy overview. + properties: + + - !ruby/object:Api::Type::Array + name: 'userDefinedFields' + description: | + Definitions of user-defined fields for CLOUD_ARMOR_NETWORK policies. A user-defined field consists of up to 4 bytes extracted from a fixed offset in the packet, relative to the IPv4, IPv6, TCP, or UDP header, with an optional mask to select certain bits. Rules may then specify matching values for these fields. Example: userDefinedFields: - name: "ipv4_fragment_offset" base: IPV4 offset: 6 size: 2 mask: "0x1fff" + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'name' + description: | + The name of this field. Must be unique within the policy. + - !ruby/object:Api::Type::Enum + name: 'base' + description: | + The base relative to which 'offset' is measured. Possible values are: - IPV4: Points to the beginning of the IPv4 header. - IPV6: Points to the beginning of the IPv6 header. - TCP: Points to the beginning of the TCP header, skipping over any IPv4 options or IPv6 extension headers. Not present for non-first fragments. - UDP: Points to the beginning of the UDP header, skipping over any IPv4 options or IPv6 extension headers. Not present for non-first fragments. required + values: + - :IPV4 + - :IPV6 + - :TCP + - :UDP + - !ruby/object:Api::Type::Integer + name: 'offset' + description: | + Offset of the first byte of the field (in network byte order) relative to 'base'. + - !ruby/object:Api::Type::Integer + name: 'size' + description: | + Size of the field in bytes. Valid values: 1-4. + - !ruby/object:Api::Type::String + name: 'mask' + description: | + If specified, apply this mask (bitwise AND) to the field to ignore bits before matching. Encoded as a hexadecimal number (starting with "0x"). The last byte of the field (in network byte order) corresponds to the least significant byte of the mask. + - !ruby/object:Api::Type::String + name: 'kind' + description: | + [Output only] Type of the resource. Always compute#securityPolicyfor security policies + - !ruby/object:Api::Type::String + name: 'id' + description: | + [Output Only] The unique identifier for the resource. This identifier is defined by the server. + - !ruby/object:Api::Type::String + name: 'creationTimestamp' + description: | + [Output Only] Creation timestamp in RFC3339 text format. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. + - !ruby/object:Api::Type::String + name: 'description' + description: | + An optional description of this resource. Provide this property when you create the resource. + - !ruby/object:Api::Type::Array + name: 'rules' + description: | + A list of rules that belong to this policy. There must always be a default rule which is a rule with priority 2147483647 and match all condition (for the match condition this means match "*" for srcIpRanges and for the networkMatch condition every field must be either match "*" or not set). If no rules are provided when creating a security policy, a default rule with action "allow" will be added. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'kind' + description: | + [Output only] Type of the resource. Always compute#securityPolicyRule for security policy rules + - !ruby/object:Api::Type::String + name: 'description' + description: | + An optional description of this resource. Provide this property when you create the resource. + - !ruby/object:Api::Type::Integer + name: 'priority' + description: | + An integer indicating the priority of a rule in the list. The priority must be a positive value between 0 and 2147483647. Rules are evaluated from highest to lowest priority where 0 is the highest priority and 2147483647 is the lowest priority. + - !ruby/object:Api::Type::NestedObject + name: 'match' + description: | + Represents a match condition that incoming traffic is evaluated against. Exactly one field must be specified. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'expr' + description: | + Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. The syntax and semantics of CEL are documented at https://github.com/google/cel-spec. Example (Comparison): title: "Summary size limit" description: "Determines if a summary is less than 100 chars" expression: "document.summary.size() < 100" Example (Equality): title: "Requestor is owner" description: "Determines if requestor is the document owner" expression: "document.owner == request.auth.claims.email" Example (Logic): title: "Public documents" description: "Determine whether the document should be publicly visible" expression: "document.type != 'private' && document.type != 'internal'" Example (Data Manipulation): title: "Notification string" description: "Create a notification string with a timestamp." expression: "'New message received at ' + string(document.create_time)" The exact variables and functions that may be referenced within an expression are determined by the service that evaluates it. See the service documentation for additional information. + properties: + - !ruby/object:Api::Type::String + name: 'expression' + description: | + Textual representation of an expression in Common Expression Language syntax. + - !ruby/object:Api::Type::String + name: 'title' + description: | + Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression. + - !ruby/object:Api::Type::String + name: 'description' + description: | + Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. + - !ruby/object:Api::Type::String + name: 'location' + description: | + Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file. + - !ruby/object:Api::Type::NestedObject + name: 'exprOptions' + description: | + + properties: + - !ruby/object:Api::Type::NestedObject + name: 'recaptchaOptions' + description: | + + properties: + - !ruby/object:Api::Type::Array + name: 'actionTokenSiteKeys' + description: | + A list of site keys to be used during the validation of reCAPTCHA action-tokens. The provided site keys need to be created from reCAPTCHA API under the same project where the security policy is created. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'sessionTokenSiteKeys' + description: | + A list of site keys to be used during the validation of reCAPTCHA session-tokens. The provided site keys need to be created from reCAPTCHA API under the same project where the security policy is created. + item_type: Api::Type::String + - !ruby/object:Api::Type::Enum + name: 'versionedExpr' + description: | + Preconfigured versioned expression. If this field is specified, config must also be specified. Available preconfigured expressions along with their requirements are: SRC_IPS_V1 - must specify the corresponding src_ip_range field in config. + values: + - :FIREWALL + - :SRC_IPS_V1 + - !ruby/object:Api::Type::NestedObject + name: 'config' + description: | + + properties: + - !ruby/object:Api::Type::Array + name: 'srcIpRanges' + description: | + CIDR IP address range. Maximum number of src_ip_ranges allowed is 10. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'destIpRanges' + description: | + CIDR IP address range. This field may only be specified when versioned_expr is set to FIREWALL. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'destPorts' + description: | + Pairs of IP protocols and ports that the rule should match. This field may only be specified when versioned_expr is set to FIREWALL. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'ipProtocol' + description: | + The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp), or the IP protocol number. + - !ruby/object:Api::Type::Array + name: 'ports' + description: | + An optional list of ports to which this rule applies. This field is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. Example inputs include: ["22"], ["80","443"], and ["12345-12349"]. This field may only be specified when versioned_expr is set to FIREWALL. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'layer4Configs' + description: | + Pairs of IP protocols and ports that the rule should match. This field may only be specified when versioned_expr is set to FIREWALL. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'ipProtocol' + description: | + The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp), or the IP protocol number. + - !ruby/object:Api::Type::Array + name: 'ports' + description: | + An optional list of ports to which this rule applies. This field is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. Example inputs include: ["22"], ["80","443"], and ["12345-12349"]. This field may only be specified when versioned_expr is set to FIREWALL. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'networkMatch' + description: | + Represents a match condition that incoming network traffic is evaluated against. + properties: + - !ruby/object:Api::Type::Array + name: 'userDefinedFields' + description: | + User-defined fields. Each element names a defined field and lists the matching values for that field. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'name' + description: | + Name of the user-defined field, as given in the definition. + - !ruby/object:Api::Type::Array + name: 'values' + description: | + Matching values of the field. Each element can be a 32-bit unsigned decimal or hexadecimal (starting with "0x") number (e.g. "64") or range (e.g. "0x400-0x7ff"). + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'srcIpRanges' + description: | + Source IPv4/IPv6 addresses or CIDR prefixes, in standard text format. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'destIpRanges' + description: | + Destination IPv4/IPv6 addresses or CIDR prefixes, in standard text format. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'ipProtocols' + description: | + IPv4 protocol / IPv6 next header (after extension headers). Each element can be an 8-bit unsigned decimal number (e.g. "6"), range (e.g. "253-254"), or one of the following protocol names: "tcp", "udp", "icmp", "esp", "ah", "ipip", or "sctp". + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'srcPorts' + description: | + Source port numbers for TCP/UDP/SCTP. Each element can be a 16-bit unsigned decimal number (e.g. "80") or range (e.g. "0-1023"). + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'destPorts' + description: | + Destination port numbers for TCP/UDP/SCTP. Each element can be a 16-bit unsigned decimal number (e.g. "80") or range (e.g. "0-1023"). + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'srcRegionCodes' + description: | + Two-letter ISO 3166-1 alpha-2 country code associated with the source IP address. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'srcAsns' + description: | + BGP Autonomous System Number associated with the source IP address. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'action' + description: | + The Action to perform when the rule is matched. The following are the valid actions: - allow: allow access to target. - deny(STATUS): deny access to target, returns the HTTP response code specified. Valid values for `STATUS` are 403, 404, and 502. - rate_based_ban: limit client traffic to the configured threshold and ban the client if the traffic exceeds the threshold. Configure parameters for this action in RateLimitOptions. Requires rate_limit_options to be set. - redirect: redirect to a different target. This can either be an internal reCAPTCHA redirect, or an external URL-based redirect via a 302 response. Parameters for this action can be configured via redirectOptions. This action is only supported in Global Security Policies of type CLOUD_ARMOR. - throttle: limit client traffic to the configured threshold. Configure parameters for this action in rateLimitOptions. Requires rate_limit_options to be set for this. + - !ruby/object:Api::Type::Boolean + name: 'preview' + description: | + If set to true, the specified action is not enforced. + - !ruby/object:Api::Type::Enum + name: 'direction' + description: | + The direction in which this rule applies. This field may only be specified when versioned_expr is set to FIREWALL. + values: + - :EGRESS + - :INGRESS + - !ruby/object:Api::Type::Array + name: 'targetResources' + description: | + A list of network resource URLs to which this rule applies. This field allows you to control which network's VMs get this rule. If this field is left blank, all VMs within the organization will receive the rule. This field may only be specified when versioned_expr is set to FIREWALL. + item_type: Api::Type::String + - !ruby/object:Api::Type::Boolean + name: 'enableLogging' + description: | + Denotes whether to enable logging for a particular rule. If logging is enabled, logs will be exported to the configured export destination in Stackdriver. Logs may be exported to BigQuery or Pub/Sub. Note: you cannot enable logging on "goto_next" rules. This field may only be specified when the versioned_expr is set to FIREWALL. + - !ruby/object:Api::Type::Integer + name: 'ruleTupleCount' + description: | + [Output Only] Calculation of the complexity of a single firewall security policy rule. + - !ruby/object:Api::Type::NestedObject + name: 'rateLimitOptions' + description: | + + properties: + - !ruby/object:Api::Type::NestedObject + name: 'rateLimitThreshold' + description: | + + properties: + - !ruby/object:Api::Type::Integer + name: 'count' + description: | + Number of HTTP(S) requests for calculating the threshold. + - !ruby/object:Api::Type::Integer + name: 'intervalSec' + description: | + Interval over which the threshold is computed. + - !ruby/object:Api::Type::String + name: 'conformAction' + description: | + Action to take for requests that are under the configured rate limit threshold. Valid option is "allow" only. + - !ruby/object:Api::Type::String + name: 'exceedAction' + description: | + Action to take for requests that are above the configured rate limit threshold, to either deny with a specified HTTP response code, or redirect to a different endpoint. Valid options are `deny(STATUS)`, where valid values for `STATUS` are 403, 404, 429, and 502, and `redirect`, where the redirect parameters come from `exceedRedirectOptions` below. The `redirect` action is only supported in Global Security Policies of type CLOUD_ARMOR. + - !ruby/object:Api::Type::NestedObject + name: 'exceedRedirectOptions' + description: | + + properties: + - !ruby/object:Api::Type::Enum + name: 'type' + description: | + Type of the redirect action. + values: + - :EXTERNAL_302 + - :GOOGLE_RECAPTCHA + - !ruby/object:Api::Type::String + name: 'target' + description: | + Target for the redirect action. This is required if the type is EXTERNAL_302 and cannot be specified for GOOGLE_RECAPTCHA. + - !ruby/object:Api::Type::NestedObject + name: 'exceedActionRpcStatus' + description: | + Simplified google.rpc.Status type (omitting details). + properties: + - !ruby/object:Api::Type::Integer + name: 'code' + description: | + The status code, which should be an enum value of google.rpc.Code. + - !ruby/object:Api::Type::String + name: 'message' + description: | + A developer-facing error message, which should be in English. + - !ruby/object:Api::Type::Enum + name: 'enforceOnKey' + description: | + Determines the key to enforce the rate_limit_threshold on. Possible values are: - ALL: A single rate limit threshold is applied to all the requests matching this rule. This is the default value if "enforceOnKey" is not configured. - IP: The source IP address of the request is the key. Each IP has this limit enforced separately. - HTTP_HEADER: The value of the HTTP header whose name is configured under "enforceOnKeyName". The key value is truncated to the first 128 bytes of the header value. If no such header is present in the request, the key type defaults to ALL. - XFF_IP: The first IP address (i.e. the originating client IP address) specified in the list of IPs under X-Forwarded-For HTTP header. If no such header is present or the value is not a valid IP, the key defaults to the source IP address of the request i.e. key type IP. - HTTP_COOKIE: The value of the HTTP cookie whose name is configured under "enforceOnKeyName". The key value is truncated to the first 128 bytes of the cookie value. If no such cookie is present in the request, the key type defaults to ALL. - HTTP_PATH: The URL path of the HTTP request. The key value is truncated to the first 128 bytes. - SNI: Server name indication in the TLS session of the HTTPS request. The key value is truncated to the first 128 bytes. The key type defaults to ALL on a HTTP session. - REGION_CODE: The country/region from which the request originates. - TLS_JA3_FINGERPRINT: JA3 TLS/SSL fingerprint if the client connects using HTTPS, HTTP/2 or HTTP/3. If not available, the key type defaults to ALL. - USER_IP: The IP address of the originating client, which is resolved based on "userIpRequestHeaders" configured with the security policy. If there is no "userIpRequestHeaders" configuration or an IP address cannot be resolved from it, the key type defaults to IP. + values: + - :ALL + - :ALL_IPS + - :HTTP_COOKIE + - :HTTP_HEADER + - :HTTP_PATH + - :IP + - :REGION_CODE + - :SNI + - :TLS_JA3_FINGERPRINT + - :USER_IP + - :XFF_IP + - !ruby/object:Api::Type::String + name: 'enforceOnKeyName' + description: | + Rate limit key name applicable only for the following key types: HTTP_HEADER -- Name of the HTTP header whose value is taken as the key value. HTTP_COOKIE -- Name of the HTTP cookie whose value is taken as the key value. + - !ruby/object:Api::Type::Array + name: 'enforceOnKeyConfigs' + description: | + If specified, any combination of values of enforce_on_key_type/enforce_on_key_name is treated as the key on which ratelimit threshold/action is enforced. You can specify up to 3 enforce_on_key_configs. If enforce_on_key_configs is specified, enforce_on_key must not be specified. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::Enum + name: 'enforceOnKeyType' + description: | + Determines the key to enforce the rate_limit_threshold on. Possible values are: - ALL: A single rate limit threshold is applied to all the requests matching this rule. This is the default value if "enforceOnKeyConfigs" is not configured. - IP: The source IP address of the request is the key. Each IP has this limit enforced separately. - HTTP_HEADER: The value of the HTTP header whose name is configured under "enforceOnKeyName". The key value is truncated to the first 128 bytes of the header value. If no such header is present in the request, the key type defaults to ALL. - XFF_IP: The first IP address (i.e. the originating client IP address) specified in the list of IPs under X-Forwarded-For HTTP header. If no such header is present or the value is not a valid IP, the key defaults to the source IP address of the request i.e. key type IP. - HTTP_COOKIE: The value of the HTTP cookie whose name is configured under "enforceOnKeyName". The key value is truncated to the first 128 bytes of the cookie value. If no such cookie is present in the request, the key type defaults to ALL. - HTTP_PATH: The URL path of the HTTP request. The key value is truncated to the first 128 bytes. - SNI: Server name indication in the TLS session of the HTTPS request. The key value is truncated to the first 128 bytes. The key type defaults to ALL on a HTTP session. - REGION_CODE: The country/region from which the request originates. - TLS_JA3_FINGERPRINT: JA3 TLS/SSL fingerprint if the client connects using HTTPS, HTTP/2 or HTTP/3. If not available, the key type defaults to ALL. - USER_IP: The IP address of the originating client, which is resolved based on "userIpRequestHeaders" configured with the security policy. If there is no "userIpRequestHeaders" configuration or an IP address cannot be resolved from it, the key type defaults to IP. + values: + - :ALL + - :ALL_IPS + - :HTTP_COOKIE + - :HTTP_HEADER + - :HTTP_PATH + - :IP + - :REGION_CODE + - :SNI + - :TLS_JA3_FINGERPRINT + - :USER_IP + - :XFF_IP + - !ruby/object:Api::Type::String + name: 'enforceOnKeyName' + description: | + Rate limit key name applicable only for the following key types: HTTP_HEADER -- Name of the HTTP header whose value is taken as the key value. HTTP_COOKIE -- Name of the HTTP cookie whose value is taken as the key value. + - !ruby/object:Api::Type::NestedObject + name: 'banThreshold' + description: | + + properties: + - !ruby/object:Api::Type::Integer + name: 'count' + description: | + Number of HTTP(S) requests for calculating the threshold. + - !ruby/object:Api::Type::Integer + name: 'intervalSec' + description: | + Interval over which the threshold is computed. + - !ruby/object:Api::Type::Integer + name: 'banDurationSec' + description: | + Can only be specified if the action for the rule is "rate_based_ban". If specified, determines the time (in seconds) the traffic will continue to be banned by the rate limit after the rate falls below the threshold. + - !ruby/object:Api::Type::Array + name: 'targetServiceAccounts' + description: | + A list of service accounts indicating the sets of instances that are applied with this rule. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'ruleNumber' + description: | + Identifier for the rule. This is only unique within the given security policy. This can only be set during rule creation, if rule number is not specified it will be generated by the server. + - !ruby/object:Api::Type::String + name: 'redirectTarget' + description: | + This must be specified for redirect actions. Cannot be specified for any other actions. + - !ruby/object:Api::Type::NestedObject + name: 'headerAction' + description: | + + properties: + - !ruby/object:Api::Type::Array + name: 'requestHeadersToAdds' + description: | + The list of request headers to add or overwrite if they're already present. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'headerName' + description: | + The name of the header to set. + - !ruby/object:Api::Type::String + name: 'headerValue' + description: | + The value to set the named header to. + - !ruby/object:Api::Type::NestedObject + name: 'redirectOptions' + description: | + + properties: + - !ruby/object:Api::Type::Enum + name: 'type' + description: | + Type of the redirect action. + values: + - :EXTERNAL_302 + - :GOOGLE_RECAPTCHA + - !ruby/object:Api::Type::String + name: 'target' + description: | + Target for the redirect action. This is required if the type is EXTERNAL_302 and cannot be specified for GOOGLE_RECAPTCHA. + - !ruby/object:Api::Type::Enum + name: 'ruleManagedProtectionTier' + description: | + [Output Only] The minimum managed protection tier required for this rule. [Deprecated] Use requiredManagedProtectionTiers instead. + values: + - :CAMP_PLUS_ANNUAL + - :CAMP_PLUS_PAYGO + - :CA_STANDARD + - !ruby/object:Api::Type::NestedObject + name: 'preconfiguredWafConfig' + description: | + + properties: + - !ruby/object:Api::Type::Array + name: 'exclusions' + description: | + A list of exclusions to apply during preconfigured WAF evaluation. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'targetRuleSet' + description: | + Target WAF rule set to apply the preconfigured WAF exclusion. + - !ruby/object:Api::Type::Array + name: 'targetRuleIds' + description: | + A list of target rule IDs under the WAF rule set to apply the preconfigured WAF exclusion. If omitted, it refers to all the rule IDs under the WAF rule set. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'requestHeadersToExclude' + description: | + A list of request header names whose value will be excluded from inspection during preconfigured WAF evaluation. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'val' + description: | + The value of the field. + - !ruby/object:Api::Type::Enum + name: 'op' + description: | + The match operator for the field. + values: + - :CONTAINS + - :ENDS_WITH + - :EQUALS + - :EQUALS_ANY + - :STARTS_WITH + - !ruby/object:Api::Type::Array + name: 'requestCookiesToExclude' + description: | + A list of request cookie names whose value will be excluded from inspection during preconfigured WAF evaluation. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'val' + description: | + The value of the field. + - !ruby/object:Api::Type::Enum + name: 'op' + description: | + The match operator for the field. + values: + - :CONTAINS + - :ENDS_WITH + - :EQUALS + - :EQUALS_ANY + - :STARTS_WITH + - !ruby/object:Api::Type::Array + name: 'requestQueryParamsToExclude' + description: | + A list of request query parameter names whose value will be excluded from inspection during preconfigured WAF evaluation. Note that the parameter can be in the query string or in the POST body. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'val' + description: | + The value of the field. + - !ruby/object:Api::Type::Enum + name: 'op' + description: | + The match operator for the field. + values: + - :CONTAINS + - :ENDS_WITH + - :EQUALS + - :EQUALS_ANY + - :STARTS_WITH + - !ruby/object:Api::Type::Array + name: 'requestUrisToExclude' + description: | + A list of request URIs from the request line to be excluded from inspection during preconfigured WAF evaluation. When specifying this field, the query or fragment part should be excluded. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'val' + description: | + The value of the field. + - !ruby/object:Api::Type::Enum + name: 'op' + description: | + The match operator for the field. + values: + - :CONTAINS + - :ENDS_WITH + - :EQUALS + - :EQUALS_ANY + - :STARTS_WITH + - !ruby/object:Api::Type::NestedObject + name: 'cloudArmorConfig' + description: | + Configuration options for Cloud Armor. + properties: + - !ruby/object:Api::Type::Boolean + name: 'enableMl' + description: | + If set to true, enables Cloud Armor Machine Learning. + - !ruby/object:Api::Type::NestedObject + name: 'adaptiveProtectionConfig' + description: | + Configuration options for Cloud Armor Adaptive Protection (CAAP). + properties: + - !ruby/object:Api::Type::NestedObject + name: 'layer7DdosDefenseConfig' + description: | + Configuration options for L7 DDoS detection. This field is only supported in Global Security Policies of type CLOUD_ARMOR. + properties: + - !ruby/object:Api::Type::Boolean + name: 'enable' + description: | + If set to true, enables CAAP for L7 DDoS detection. This field is only supported in Global Security Policies of type CLOUD_ARMOR. + - !ruby/object:Api::Type::Enum + name: 'ruleVisibility' + description: | + Rule visibility can be one of the following: STANDARD - opaque rules. (default) PREMIUM - transparent rules. This field is only supported in Global Security Policies of type CLOUD_ARMOR. + values: + - :PREMIUM + - :STANDARD + - !ruby/object:Api::Type::Array + name: 'thresholdConfigs' + description: | + Configuration options for layer7 adaptive protection for various customizable thresholds. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'name' + description: | + The name must be 1-63 characters long, and comply with RFC1035. The name must be unique within the security policy. + - !ruby/object:Api::Type::Integer + name: 'autoDeployLoadThreshold' + description: | + + - !ruby/object:Api::Type::Integer + name: 'autoDeployConfidenceThreshold' + description: | + + - !ruby/object:Api::Type::Integer + name: 'autoDeployImpactedBaselineThreshold' + description: | + + - !ruby/object:Api::Type::Integer + name: 'autoDeployExpirationSec' + description: | + + - !ruby/object:Api::Type::Integer + name: 'detectionLoadThreshold' + description: | + + - !ruby/object:Api::Type::Integer + name: 'detectionAbsoluteQps' + description: | + + - !ruby/object:Api::Type::Integer + name: 'detectionRelativeToBaselineQps' + description: | + + - !ruby/object:Api::Type::Array + name: 'trafficGranularityConfigs' + description: | + Configuration options for enabling Adaptive Protection to operate on specified granular traffic units. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::Enum + name: 'type' + description: | + Type of this configuration. + values: + - :HTTP_HEADER_HOST + - :HTTP_PATH + - :UNSPECIFIED_TYPE + - !ruby/object:Api::Type::String + name: 'value' + description: | + Requests that match this value constitute a granular traffic unit. + - !ruby/object:Api::Type::Boolean + name: 'enableEachUniqueValue' + description: | + If enabled, traffic matching each unique value for the specified type constitutes a separate traffic unit. It can only be set to true if `value` is empty. + - !ruby/object:Api::Type::NestedObject + name: 'autoDeployConfig' + description: | + Configuration options for Adaptive Protection auto-deploy feature. + properties: + - !ruby/object:Api::Type::Integer + name: 'loadThreshold' + description: | + + - !ruby/object:Api::Type::Integer + name: 'confidenceThreshold' + description: | + + - !ruby/object:Api::Type::Integer + name: 'impactedBaselineThreshold' + description: | + + - !ruby/object:Api::Type::Integer + name: 'expirationSec' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'ddosProtectionConfig' + description: | + + properties: + - !ruby/object:Api::Type::Enum + name: 'ddosProtection' + description: | + + values: + - :ADVANCED + - :ADVANCED_PREVIEW + - :STANDARD + - !ruby/object:Api::Type::NestedObject + name: 'advancedOptionsConfig' + description: | + + properties: + - !ruby/object:Api::Type::Enum + name: 'jsonParsing' + description: | + + values: + - :DISABLED + - :STANDARD + - :STANDARD_WITH_GRAPHQL + - !ruby/object:Api::Type::NestedObject + name: 'jsonCustomConfig' + description: | + + properties: + - !ruby/object:Api::Type::Array + name: 'contentTypes' + description: | + A list of custom Content-Type header values to apply the JSON parsing. As per RFC 1341, a Content-Type header value has the following format: Content-Type := type "/" subtype *[";" parameter] When configuring a custom Content-Type header value, only the type/subtype needs to be specified, and the parameters should be excluded. + item_type: Api::Type::String + - !ruby/object:Api::Type::Enum + name: 'logLevel' + description: | + + values: + - :NORMAL + - :VERBOSE + - !ruby/object:Api::Type::Array + name: 'userIpRequestHeaders' + description: | + An optional list of case-insensitive request header names to use for resolving the callers client IP address. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'recaptchaOptionsConfig' + description: | + + properties: + - !ruby/object:Api::Type::String + name: 'redirectSiteKey' + description: | + An optional field to supply a reCAPTCHA site key to be used for all the rules using the redirect action with the type of GOOGLE_RECAPTCHA under the security policy. The specified site key needs to be created from the reCAPTCHA API. The user is responsible for the validity of the specified site key. If not specified, a Google-managed site key is used. This field is only supported in Global Security Policies of type CLOUD_ARMOR. + - !ruby/object:Api::Type::String + name: 'fingerprint' + description: | + Specifies a fingerprint for this resource, which is essentially a hash of the metadata's contents and used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update metadata. You must always provide an up-to-date fingerprint hash in order to update or change metadata, otherwise the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make get() request to the security policy. + - !ruby/object:Api::Type::String + name: 'selfLink' + description: | + [Output Only] Server-defined URL for the resource. + - !ruby/object:Api::Type::String + name: 'selfLinkWithId' + description: | + [Output Only] Server-defined URL for this resource with the resource id. + - !ruby/object:Api::Type::Enum + name: 'type' + description: | + The type indicates the intended use of the security policy. - CLOUD_ARMOR: Cloud Armor backend security policies can be configured to filter incoming HTTP requests targeting backend services. They filter requests before they hit the origin servers. - CLOUD_ARMOR_EDGE: Cloud Armor edge security policies can be configured to filter incoming HTTP requests targeting backend services (including Cloud CDN-enabled) as well as backend buckets (Cloud Storage). They filter requests before the request is served from Google's cache. - CLOUD_ARMOR_INTERNAL_SERVICE: Cloud Armor internal service policies can be configured to filter HTTP requests targeting services managed by Traffic Director in a service mesh. They filter requests before the request is served from the application. - CLOUD_ARMOR_NETWORK: Cloud Armor network policies can be configured to filter packets targeting network load balancing resources such as backend services, target pools, target instances, and instances with external IPs. They filter requests before the request is served from the application. This field can be set only at resource creation time. + values: + - :CLOUD_ARMOR + - :CLOUD_ARMOR_EDGE + - :CLOUD_ARMOR_INTERNAL_SERVICE + - :CLOUD_ARMOR_NETWORK + - :FIREWALL + - !ruby/object:Api::Type::Array + name: 'associations' + description: | + A list of associations that belong to this policy. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'name' + description: | + The name for an association. + - !ruby/object:Api::Type::String + name: 'attachmentId' + description: | + The resource that the security policy is attached to. + - !ruby/object:Api::Type::String + name: 'securityPolicyId' + description: | + [Output Only] The security policy ID of the association. + - !ruby/object:Api::Type::String + name: 'displayName' + description: | + [Output Only] The display name of the security policy of the association. + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + Labels for this resource. These can only be added or modified by the setLabels method. Each label key/value pair must comply with RFC1035. Label values may be empty. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::String + name: 'labelFingerprint' + description: | + A fingerprint for the labels being applied to this security policy, which is essentially a hash of the labels set used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels. To see the latest fingerprint, make get() request to the security policy. + - !ruby/object:Api::Type::Integer + name: 'ruleTupleCount' + description: | + [Output Only] Total count of all security policy rule tuples. A security policy can not exceed a set number of tuples. + - !ruby/object:Api::Type::String + name: 'displayName' + description: | + User-provided name of the organization security policy. The name should be unique in the organization in which the security policy is created. This should only be used when SecurityPolicyType is FIREWALL. The name must be 1-63 characters long, and comply with https://www.ietf.org/rfc/rfc1035.txt. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. + - !ruby/object:Api::Type::String + name: 'parent' + description: | + [Output Only] The parent of the security policy. + - !ruby/object:Api::Type::String + name: 'region' + description: | + [Output Only] URL of the region where the regional security policy resides. This field is not applicable to global security policies. + + + + + - !ruby/object:Api::Resource + name: PublicAdvertisedPrefix + base_url: 'projects/{{project}}/global/publicAdvertisedPrefixes' + self_link: 'projects/{{project}}/global/publicAdvertisedPrefixes/{{publicAdvertisedPrefix}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/compute/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + A public advertised prefix represents an aggregated IP prefix or netblock which customers bring to cloud. The IP prefix is a single unit of route advertisement and is announced globally to the internet. + properties: + + - !ruby/object:Api::Type::String + name: 'kind' + description: | + [Output Only] Type of the resource. Always compute#publicAdvertisedPrefix for public advertised prefixes. + - !ruby/object:Api::Type::String + name: 'id' + description: | + [Output Only] The unique identifier for the resource type. The server generates this identifier. + - !ruby/object:Api::Type::String + name: 'creationTimestamp' + description: | + [Output Only] Creation timestamp in RFC3339 text format. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. + - !ruby/object:Api::Type::String + name: 'description' + description: | + An optional description of this resource. Provide this property when you create the resource. + - !ruby/object:Api::Type::String + name: 'selfLink' + description: | + [Output Only] Server-defined URL for the resource. + - !ruby/object:Api::Type::String + name: 'selfLinkWithId' + description: | + [Output Only] Server-defined URL with id for the resource. + - !ruby/object:Api::Type::String + name: 'ipCidrRange' + description: | + The address range, in CIDR format, represented by this public advertised prefix. + - !ruby/object:Api::Type::String + name: 'dnsVerificationIp' + description: | + The address to be used for reverse DNS verification. + - !ruby/object:Api::Type::String + name: 'sharedSecret' + description: | + [Output Only] The shared secret to be used for reverse DNS verification. + - !ruby/object:Api::Type::Enum + name: 'status' + description: | + The status of the public advertised prefix. Possible values include: - `INITIAL`: RPKI validation is complete. - `PTR_CONFIGURED`: User has configured the PTR. - `VALIDATED`: Reverse DNS lookup is successful. - `REVERSE_DNS_LOOKUP_FAILED`: Reverse DNS lookup failed. - `PREFIX_CONFIGURATION_IN_PROGRESS`: The prefix is being configured. - `PREFIX_CONFIGURATION_COMPLETE`: The prefix is fully configured. - `PREFIX_REMOVAL_IN_PROGRESS`: The prefix is being removed. + values: + - :ANNOUNCED_TO_INTERNET + - :INITIAL + - :PREFIX_CONFIGURATION_COMPLETE + - :PREFIX_CONFIGURATION_IN_PROGRESS + - :PREFIX_REMOVAL_IN_PROGRESS + - :PTR_CONFIGURED + - :READY_TO_ANNOUNCE + - :REVERSE_DNS_LOOKUP_FAILED + - :VALIDATED + - !ruby/object:Api::Type::Enum + name: 'pdpScope' + description: | + Specifies how child public delegated prefix will be scoped. It could be one of following values: - `REGIONAL`: The public delegated prefix is regional only. The provisioning will take a few minutes. - `GLOBAL`: The public delegated prefix is global only. The provisioning will take ~4 weeks. - `GLOBAL_AND_REGIONAL` [output only]: The public delegated prefixes is BYOIP V1 legacy prefix. This is output only value and no longer supported in BYOIP V2. + values: + - :GLOBAL + - :GLOBAL_AND_REGIONAL + - :REGIONAL + - !ruby/object:Api::Type::Array + name: 'publicDelegatedPrefixs' + description: | + [Output Only] The list of public delegated prefixes that exist for this public advertised prefix. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'name' + description: | + The name of the public delegated prefix + - !ruby/object:Api::Type::String + name: 'region' + description: | + The region of the public delegated prefix if it is regional. If absent, the prefix is global. + - !ruby/object:Api::Type::String + name: 'project' + description: | + The project number of the public delegated prefix + - !ruby/object:Api::Type::String + name: 'status' + description: | + The status of the public delegated prefix. Possible values are: INITIALIZING: The public delegated prefix is being initialized and addresses cannot be created yet. ANNOUNCED: The public delegated prefix is active. + - !ruby/object:Api::Type::String + name: 'ipRange' + description: | + The IP address range of the public delegated prefix + - !ruby/object:Api::Type::String + name: 'fingerprint' + description: | + Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. This field will be ignored when inserting a new PublicAdvertisedPrefix. An up-to-date fingerprint must be provided in order to update the PublicAdvertisedPrefix, otherwise the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make a get() request to retrieve a PublicAdvertisedPrefix. + - !ruby/object:Api::Type::Enum + name: 'byoipApiVersion' + description: | + [Output Only] The version of BYOIP API. + values: + - :V1 + - :V2 + diff --git a/mmv1/products/compute/inspec.yaml b/mmv1/products/compute/inspec.yaml index bc7caad13..c63dc0f02 100644 --- a/mmv1/products/compute/inspec.yaml +++ b/mmv1/products/compute/inspec.yaml @@ -67,6 +67,8 @@ overrides: !ruby/object:Overrides::ResourceOverrides exclude: false status: !ruby/object:Overrides::Inspec::PropertyOverride exclude: false + ImageFamilyView: !ruby/object:Overrides::Inspec::ResourceOverride + singular_only: true Instance: !ruby/object:Overrides::Inspec::ResourceOverride additional_functions: third_party/inspec/custom_functions/google_compute_instance.erb singular_extra_examples: third_party/inspec/documentation/google_compute_instance.md @@ -85,18 +87,6 @@ overrides: !ruby/object:Overrides::ResourceOverrides override_name: "instance_group_id" name: !ruby/object:Overrides::Inspec::PropertyOverride override_name: "instance_group_name" - InstanceGroupNamedPort: !ruby/object:Overrides::Inspec::ResourceOverride - exclude: true - InterconnectAttachment: !ruby/object:Overrides::Inspec::ResourceOverride - exclude: true - License: !ruby/object:Overrides::Inspec::ResourceOverride - exclude: true - MachineType: !ruby/object:Overrides::Inspec::ResourceOverride - exclude: true - MachineImage: !ruby/object:Overrides::Inspec::ResourceOverride - exclude: true - ManagedSslCertificate: !ruby/object:Overrides::Inspec::ResourceOverride - exclude: true Network: !ruby/object:Overrides::Inspec::ResourceOverride additional_functions: third_party/inspec/custom_functions/google_compute_network.erb singular_extra_examples: third_party/inspec/documentation/google_compute_network.md @@ -113,7 +103,7 @@ overrides: !ruby/object:Overrides::ResourceOverrides GlobalNetworkEndpoint: !ruby/object:Overrides::Inspec::ResourceOverride exclude: true GlobalNetworkEndpointGroup: !ruby/object:Overrides::Inspec::ResourceOverride - exclude: true + exclude: false NetworkPeeringRoutesConfig: !ruby/object:Overrides::Inspec::ResourceOverride exclude: true NodeGroup: !ruby/object:Overrides::Inspec::ResourceOverride @@ -129,7 +119,7 @@ overrides: !ruby/object:Overrides::ResourceOverrides OrganizationSecurityPolicyRule: !ruby/object:Overrides::Inspec::ResourceOverride exclude: true PacketMirroring: !ruby/object:Overrides::Inspec::ResourceOverride - exclude: true + exclude: false ProjectInfo: !ruby/object:Overrides::Inspec::ResourceOverride singular_only: true singular_extra_examples: third_party/inspec/documentation/google_compute_project_info.md @@ -143,8 +133,6 @@ overrides: !ruby/object:Overrides::ResourceOverrides override_name: "region_name" status: !ruby/object:Overrides::Inspec::PropertyOverride override_name: "region_status" - RegionAutoscaler: !ruby/object:Overrides::Inspec::ResourceOverride - exclude: true RegionDisk: !ruby/object:Overrides::Inspec::ResourceOverride exclude: true RegionDiskType: !ruby/object:Overrides::Inspec::ResourceOverride @@ -155,7 +143,6 @@ overrides: !ruby/object:Overrides::ResourceOverrides singular_extra_examples: third_party/inspec/documentation/google_compute_region_instance_group_manager.md plural_extra_examples: third_party/inspec/documentation/google_compute_region_instance_group_managers.md additional_functions: third_party/inspec/custom_functions/google_compute_zone_operation.erb - additional_functions: third_party/inspec/custom_functions/google_compute_region_instance_group.erb properties: id: !ruby/object:Overrides::Inspec::PropertyOverride override_name: "instance_group_manager_id" @@ -163,20 +150,10 @@ overrides: !ruby/object:Overrides::ResourceOverrides override_name: "instance_group_manager_name" RegionSslCertificate: !ruby/object:Overrides::Inspec::ResourceOverride exclude: true - RegionTargetHttpProxy: !ruby/object:Overrides::Inspec::ResourceOverride - exclude: true - RegionTargetHttpsProxy: !ruby/object:Overrides::Inspec::ResourceOverride - exclude: true - RegionUrlMap: !ruby/object:Overrides::Inspec::ResourceOverride - exclude: true Reservation: !ruby/object:Overrides::Inspec::ResourceOverride - exclude: true - ResourcePolicy: !ruby/object:Overrides::Inspec::ResourceOverride - exclude: true + exclude: false RouterBgpPeer: !ruby/object:Overrides::Inspec::ResourceOverride exclude: true - ServiceAttachment: !ruby/object:Overrides::Inspec::ResourceOverride - exclude: true Subnetwork: !ruby/object:Overrides::Inspec::ResourceOverride additional_functions: third_party/inspec/custom_functions/google_compute_subnetwork.erb singular_extra_examples: third_party/inspec/documentation/google_compute_subnetwork.md @@ -204,8 +181,6 @@ overrides: !ruby/object:Overrides::ResourceOverrides exclude: true pathMatchers.defaultRouteAction.weightedBackendServices: !ruby/object:Overrides::Inspec::PropertyOverride exclude: true - VpnGateway: !ruby/object:Overrides::Inspec::ResourceOverride - exclude: true VpnTunnel: !ruby/object:Overrides::Inspec::ResourceOverride singular_extra_examples: third_party/inspec/documentation/google_compute_vpn_tunnel.md plural_extra_examples: third_party/inspec/documentation/google_compute_vpn_tunnels.md diff --git a/mmv1/products/dataflow/api.yaml b/mmv1/products/dataflow/api.yaml new file mode 100644 index 000000000..f28584c5b --- /dev/null +++ b/mmv1/products/dataflow/api.yaml @@ -0,0 +1,2174 @@ + +--- !ruby/object:Api::Product +name: dataflow +display_name: dataflow +versions: + - !ruby/object:Api::Product::Version + name: ga + base_url: https://dataflow.googleapis.com//v1b3/ +scopes: + - https://dataflow.googleapis.com//auth/cloud-platform +apis_required: + - !ruby/object:Api::Product::ApiReference + name: https://dataflow.googleapis.com/ + url: https://console.cloud.google.com/apis/library/dataflow.googleapis.com/ +objects: + + - !ruby/object:Api::Resource + name: ProjectLocationJob + base_url: 'projects/{{projectId}}/locations/{{location}}/jobs' + self_link: 'projects/{{projectId}}/locations/{{location}}/jobs/{{jobId}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/dataflow/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + Defines a job to be run by the Cloud Dataflow service. Do not enter confidential information when you supply string values using the API. + properties: + + - !ruby/object:Api::Type::String + name: 'id' + description: | + The unique ID of this job. This field is set by the Dataflow service when the job is created, and is immutable for the life of the job. + - !ruby/object:Api::Type::String + name: 'projectId' + description: | + The ID of the Google Cloud project that the job belongs to. + - !ruby/object:Api::Type::String + name: 'name' + description: | + The user-specified Dataflow job name. Only one active job with a given name can exist in a project within one region at any given time. Jobs in different regions can have the same name. If a caller attempts to create a job with the same name as an active job that already exists, the attempt returns the existing job. The name must match the regular expression `[a-z]([-a-z0-9]{0,1022}[a-z0-9])?` + - !ruby/object:Api::Type::Enum + name: 'type' + description: | + The type of Dataflow job. + values: + - :JOB_TYPE_UNKNOWN + - :JOB_TYPE_BATCH + - :JOB_TYPE_STREAMING + - !ruby/object:Api::Type::NestedObject + name: 'environment' + description: | + Describes the environment in which a Dataflow Job runs. + properties: + - !ruby/object:Api::Type::String + name: 'tempStoragePrefix' + description: | + The prefix of the resources the system should use for temporary storage. The system will append the suffix "/temp-{JOBNAME} to this resource prefix, where {JOBNAME} is the value of the job_name field. The resulting bucket and object prefix is used as the prefix of the resources used to store temporary data needed during the job execution. NOTE: This will override the value in taskrunner_settings. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object} + - !ruby/object:Api::Type::String + name: 'clusterManagerApiService' + description: | + The type of cluster manager API to use. If unknown or unspecified, the service will attempt to choose a reasonable default. This should be in the form of the API service name, e.g. "compute.googleapis.com". + - !ruby/object:Api::Type::Array + name: 'experiments' + description: | + The list of experiments to enable. This field should be used for SDK related experiments and not for service related experiments. The proper field for service related experiments is service_options. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'serviceOptions' + description: | + The list of service options to enable. This field should be used for service related experiments only. These experiments, when graduating to GA, should be replaced by dedicated fields or become default (i.e. always on). + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'serviceKmsKeyName' + description: | + If set, contains the Cloud KMS key identifier used to encrypt data at rest, AKA a Customer Managed Encryption Key (CMEK). Format: projects/PROJECT_ID/locations/LOCATION/keyRings/KEY_RING/cryptoKeys/KEY + - !ruby/object:Api::Type::Array + name: 'workerPools' + description: | + The worker pools. At least one "harness" worker pool must be specified in order for the job to have workers. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'kind' + description: | + The kind of the worker pool; currently only `harness` and `shuffle` are supported. + - !ruby/object:Api::Type::Integer + name: 'numWorkers' + description: | + Number of Google Compute Engine workers in this pool needed to execute the job. If zero or unspecified, the service will attempt to choose a reasonable default. + - !ruby/object:Api::Type::Array + name: 'packages' + description: | + Packages to be installed on workers. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'name' + description: | + The name of the package. + - !ruby/object:Api::Type::String + name: 'location' + description: | + The resource to read the package from. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket} bucket.storage.googleapis.com/ + - !ruby/object:Api::Type::Enum + name: 'defaultPackageSet' + description: | + The default package set to install. This allows the service to select a default set of packages which are useful to worker harnesses written in a particular language. + values: + - :DEFAULT_PACKAGE_SET_UNKNOWN + - :DEFAULT_PACKAGE_SET_NONE + - :DEFAULT_PACKAGE_SET_JAVA + - :DEFAULT_PACKAGE_SET_PYTHON + - !ruby/object:Api::Type::String + name: 'machineType' + description: | + Machine type (e.g. "n1-standard-1"). If empty or unspecified, the service will attempt to choose a reasonable default. + - !ruby/object:Api::Type::Enum + name: 'teardownPolicy' + description: | + Sets the policy for determining when to turndown worker pool. Allowed values are: `TEARDOWN_ALWAYS`, `TEARDOWN_ON_SUCCESS`, and `TEARDOWN_NEVER`. `TEARDOWN_ALWAYS` means workers are always torn down regardless of whether the job succeeds. `TEARDOWN_ON_SUCCESS` means workers are torn down if the job succeeds. `TEARDOWN_NEVER` means the workers are never torn down. If the workers are not torn down by the service, they will continue to run and use Google Compute Engine VM resources in the user's project until they are explicitly terminated by the user. Because of this, Google recommends using the `TEARDOWN_ALWAYS` policy except for small, manually supervised test jobs. If unknown or unspecified, the service will attempt to choose a reasonable default. + values: + - :TEARDOWN_POLICY_UNKNOWN + - :TEARDOWN_ALWAYS + - :TEARDOWN_ON_SUCCESS + - :TEARDOWN_NEVER + - !ruby/object:Api::Type::Integer + name: 'diskSizeGb' + description: | + Size of root disk for VMs, in GB. If zero or unspecified, the service will attempt to choose a reasonable default. + - !ruby/object:Api::Type::String + name: 'diskType' + description: | + Type of root disk for VMs. If empty or unspecified, the service will attempt to choose a reasonable default. + - !ruby/object:Api::Type::String + name: 'diskSourceImage' + description: | + Fully qualified source image for disks. + - !ruby/object:Api::Type::String + name: 'zone' + description: | + Zone to run the worker pools in. If empty or unspecified, the service will attempt to choose a reasonable default. + - !ruby/object:Api::Type::NestedObject + name: 'taskrunnerSettings' + description: | + Taskrunner configuration settings. + properties: + - !ruby/object:Api::Type::String + name: 'taskUser' + description: | + The UNIX user ID on the worker VM to use for tasks launched by taskrunner; e.g. "root". + - !ruby/object:Api::Type::String + name: 'taskGroup' + description: | + The UNIX group ID on the worker VM to use for tasks launched by taskrunner; e.g. "wheel". + - !ruby/object:Api::Type::Array + name: 'oauthScopes' + description: | + The OAuth2 scopes to be requested by the taskrunner in order to access the Cloud Dataflow API. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'baseUrl' + description: | + The base URL for the taskrunner to use when accessing Google Cloud APIs. When workers access Google Cloud APIs, they logically do so via relative URLs. If this field is specified, it supplies the base URL to use for resolving these relative URLs. The normative algorithm used is defined by RFC 1808, "Relative Uniform Resource Locators". If not specified, the default value is "http://www.googleapis.com/" + - !ruby/object:Api::Type::String + name: 'dataflowApiVersion' + description: | + The API version of endpoint, e.g. "v1b3" + - !ruby/object:Api::Type::NestedObject + name: 'parallelWorkerSettings' + description: | + Provides data to pass through to the worker harness. + properties: + - !ruby/object:Api::Type::String + name: 'baseUrl' + description: | + The base URL for accessing Google Cloud APIs. When workers access Google Cloud APIs, they logically do so via relative URLs. If this field is specified, it supplies the base URL to use for resolving these relative URLs. The normative algorithm used is defined by RFC 1808, "Relative Uniform Resource Locators". If not specified, the default value is "http://www.googleapis.com/" + - !ruby/object:Api::Type::Boolean + name: 'reportingEnabled' + description: | + Whether to send work progress updates to the service. + - !ruby/object:Api::Type::String + name: 'servicePath' + description: | + The Cloud Dataflow service path relative to the root URL, for example, "dataflow/v1b3/projects". + - !ruby/object:Api::Type::String + name: 'shuffleServicePath' + description: | + The Shuffle service path relative to the root URL, for example, "shuffle/v1beta1". + - !ruby/object:Api::Type::String + name: 'workerId' + description: | + The ID of the worker running this pipeline. + - !ruby/object:Api::Type::String + name: 'tempStoragePrefix' + description: | + The prefix of the resources the system should use for temporary storage. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object} + - !ruby/object:Api::Type::String + name: 'baseTaskDir' + description: | + The location on the worker for task-specific subdirectories. + - !ruby/object:Api::Type::Boolean + name: 'continueOnException' + description: | + Whether to continue taskrunner if an exception is hit. + - !ruby/object:Api::Type::Boolean + name: 'logToSerialconsole' + description: | + Whether to send taskrunner log info to Google Compute Engine VM serial console. + - !ruby/object:Api::Type::Boolean + name: 'alsologtostderr' + description: | + Whether to also send taskrunner log info to stderr. + - !ruby/object:Api::Type::String + name: 'logUploadLocation' + description: | + Indicates where to put logs. If this is not specified, the logs will not be uploaded. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object} + - !ruby/object:Api::Type::String + name: 'logDir' + description: | + The directory on the VM to store logs. + - !ruby/object:Api::Type::String + name: 'tempStoragePrefix' + description: | + The prefix of the resources the taskrunner should use for temporary storage. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object} + - !ruby/object:Api::Type::String + name: 'harnessCommand' + description: | + The command to launch the worker harness. + - !ruby/object:Api::Type::String + name: 'workflowFileName' + description: | + The file to store the workflow in. + - !ruby/object:Api::Type::String + name: 'commandlinesFileName' + description: | + The file to store preprocessing commands in. + - !ruby/object:Api::Type::String + name: 'vmId' + description: | + The ID string of the VM. + - !ruby/object:Api::Type::String + name: 'languageHint' + description: | + The suggested backend language. + - !ruby/object:Api::Type::String + name: 'streamingWorkerMainClass' + description: | + The streaming worker main class name. + - !ruby/object:Api::Type::String + name: 'onHostMaintenance' + description: | + The action to take on host maintenance, as defined by the Google Compute Engine API. + - !ruby/object:Api::Type::Array + name: 'dataDisks' + description: | + Data disks that are used by a VM in this workflow. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::Integer + name: 'sizeGb' + description: | + Size of disk in GB. If zero or unspecified, the service will attempt to choose a reasonable default. + - !ruby/object:Api::Type::String + name: 'diskType' + description: | + Disk storage type, as defined by Google Compute Engine. This must be a disk type appropriate to the project and zone in which the workers will run. If unknown or unspecified, the service will attempt to choose a reasonable default. For example, the standard persistent disk type is a resource name typically ending in "pd-standard". If SSD persistent disks are available, the resource name typically ends with "pd-ssd". The actual valid values are defined the Google Compute Engine API, not by the Cloud Dataflow API; consult the Google Compute Engine documentation for more information about determining the set of available disk types for a particular project and zone. Google Compute Engine Disk types are local to a particular project in a particular zone, and so the resource name will typically look something like this: compute.googleapis.com/projects/project-id/zones/zone/diskTypes/pd-standard + - !ruby/object:Api::Type::String + name: 'mountPoint' + description: | + Directory in a VM where disk is mounted. + - !ruby/object:Api::Type::NestedObject + name: 'metadata' + description: | + Metadata to set on the Google Compute Engine VMs. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'autoscalingSettings' + description: | + Settings for WorkerPool autoscaling. + properties: + - !ruby/object:Api::Type::Enum + name: 'algorithm' + description: | + The algorithm to use for autoscaling. + values: + - :AUTOSCALING_ALGORITHM_UNKNOWN + - :AUTOSCALING_ALGORITHM_NONE + - :AUTOSCALING_ALGORITHM_BASIC + - !ruby/object:Api::Type::Integer + name: 'maxNumWorkers' + description: | + The maximum number of workers to cap scaling at. + - !ruby/object:Api::Type::NestedObject + name: 'poolArgs' + description: | + Extra arguments for this worker pool. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + Properties of the object. Contains field @type with type URL. + - !ruby/object:Api::Type::String + name: 'network' + description: | + Network to which VMs will be assigned. If empty or unspecified, the service will use the network "default". + - !ruby/object:Api::Type::String + name: 'subnetwork' + description: | + Subnetwork to which VMs will be assigned, if desired. Expected to be of the form "regions/REGION/subnetworks/SUBNETWORK". + - !ruby/object:Api::Type::String + name: 'workerHarnessContainerImage' + description: | + Required. Docker container image that executes the Cloud Dataflow worker harness, residing in Google Container Registry. Deprecated for the Fn API path. Use sdk_harness_container_images instead. + - !ruby/object:Api::Type::Integer + name: 'numThreadsPerWorker' + description: | + The number of threads per worker harness. If empty or unspecified, the service will choose a number of threads (according to the number of cores on the selected machine type for batch, or 1 by convention for streaming). + - !ruby/object:Api::Type::Enum + name: 'ipConfiguration' + description: | + Configuration for VM IPs. + values: + - :WORKER_IP_UNSPECIFIED + - :WORKER_IP_PUBLIC + - :WORKER_IP_PRIVATE + - !ruby/object:Api::Type::Array + name: 'sdkHarnessContainerImages' + description: | + Set of SDK harness containers needed to execute this pipeline. This will only be set in the Fn API path. For non-cross-language pipelines this should have only one entry. Cross-language pipelines will have two or more entries. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'containerImage' + description: | + A docker container image that resides in Google Container Registry. + - !ruby/object:Api::Type::Boolean + name: 'useSingleCorePerContainer' + description: | + If true, recommends the Dataflow service to use only one core per SDK container instance with this image. If false (or unset) recommends using more than one core per SDK container instance with this image for efficiency. Note that Dataflow service may choose to override this property if needed. + - !ruby/object:Api::Type::String + name: 'environmentId' + description: | + Environment ID for the Beam runner API proto Environment that corresponds to the current SDK Harness. + - !ruby/object:Api::Type::Array + name: 'capabilities' + description: | + The set of capabilities enumerated in the above Environment proto. See also [beam_runner_api.proto](https://github.com/apache/beam/blob/master/model/pipeline/src/main/proto/org/apache/beam/model/pipeline/v1/beam_runner_api.proto) + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'userAgent' + description: | + A description of the process that generated the request. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + Properties of the object. + - !ruby/object:Api::Type::NestedObject + name: 'version' + description: | + A structure describing which components and their versions of the service are required in order to run the job. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + Properties of the object. + - !ruby/object:Api::Type::String + name: 'dataset' + description: | + The dataset for the current project where various workflow related tables are stored. The supported resource type is: Google BigQuery: bigquery.googleapis.com/{dataset} + - !ruby/object:Api::Type::NestedObject + name: 'sdkPipelineOptions' + description: | + The Cloud Dataflow SDK pipeline options specified by the user. These options are passed through the service and are used to recreate the SDK pipeline options on the worker in a language agnostic and platform independent way. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + Properties of the object. + - !ruby/object:Api::Type::NestedObject + name: 'internalExperiments' + description: | + Experimental settings. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + Properties of the object. Contains field @type with type URL. + - !ruby/object:Api::Type::String + name: 'serviceAccountEmail' + description: | + Identity to run virtual machines as. Defaults to the default account. + - !ruby/object:Api::Type::Enum + name: 'flexResourceSchedulingGoal' + description: | + Which Flexible Resource Scheduling mode to run in. + values: + - :FLEXRS_UNSPECIFIED + - :FLEXRS_SPEED_OPTIMIZED + - :FLEXRS_COST_OPTIMIZED + - !ruby/object:Api::Type::String + name: 'workerRegion' + description: | + The Compute Engine region (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. "us-west1". Mutually exclusive with worker_zone. If neither worker_region nor worker_zone is specified, default to the control plane's region. + - !ruby/object:Api::Type::String + name: 'workerZone' + description: | + The Compute Engine zone (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. "us-west1-a". Mutually exclusive with worker_region. If neither worker_region nor worker_zone is specified, a zone in the control plane's region is chosen based on available capacity. + - !ruby/object:Api::Type::Enum + name: 'shuffleMode' + description: | + Output only. The shuffle mode used for the job. + values: + - :SHUFFLE_MODE_UNSPECIFIED + - :VM_BASED + - :SERVICE_BASED + - !ruby/object:Api::Type::NestedObject + name: 'debugOptions' + description: | + Describes any options that have an effect on the debugging of pipelines. + properties: + - !ruby/object:Api::Type::Boolean + name: 'enableHotKeyLogging' + description: | + When true, enables the logging of the literal hot key to the user's Cloud Logging. + - !ruby/object:Api::Type::NestedObject + name: 'dataSampling' + description: | + Configuration options for sampling elements. + properties: + - !ruby/object:Api::Type::Array + name: 'behaviors' + description: | + List of given sampling behaviors to enable. For example, specifying behaviors = [ALWAYS_ON] samples in-flight elements but does not sample exceptions. Can be used to specify multiple behaviors like, behaviors = [ALWAYS_ON, EXCEPTIONS] for specifying periodic sampling and exception sampling. If DISABLED is in the list, then sampling will be disabled and ignore the other given behaviors. Ordering does not matter. + item_type: Api::Type::String + - !ruby/object:Api::Type::Boolean + name: 'useStreamingEngineResourceBasedBilling' + description: | + Output only. Whether the job uses the Streaming Engine resource-based billing model. + - !ruby/object:Api::Type::Enum + name: 'streamingMode' + description: | + Optional. Specifies the Streaming Engine message processing guarantees. Reduces cost and latency but might result in duplicate messages committed to storage. Designed to run simple mapping streaming ETL jobs at the lowest cost. For example, Change Data Capture (CDC) to BigQuery is a canonical use case. + values: + - :STREAMING_MODE_UNSPECIFIED + - :STREAMING_MODE_EXACTLY_ONCE + - :STREAMING_MODE_AT_LEAST_ONCE + - !ruby/object:Api::Type::Array + name: 'steps' + description: | + Exactly one of step or steps_location should be specified. The top-level steps that constitute the entire job. Only retrieved with JOB_VIEW_ALL. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'kind' + description: | + The kind of step in the Cloud Dataflow job. + - !ruby/object:Api::Type::String + name: 'name' + description: | + The name that identifies the step. This must be unique for each step with respect to all other steps in the Cloud Dataflow job. + - !ruby/object:Api::Type::NestedObject + name: 'properties' + description: | + Named properties associated with the step. Each kind of predefined step has its own required set of properties. Must be provided on Create. Only retrieved with JOB_VIEW_ALL. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + Properties of the object. + - !ruby/object:Api::Type::String + name: 'stepsLocation' + description: | + The Cloud Storage location where the steps are stored. + - !ruby/object:Api::Type::Enum + name: 'currentState' + description: | + The current state of the job. Jobs are created in the `JOB_STATE_STOPPED` state unless otherwise specified. A job in the `JOB_STATE_RUNNING` state may asynchronously enter a terminal state. After a job has reached a terminal state, no further state updates may be made. This field might be mutated by the Dataflow service; callers cannot mutate it. + values: + - :JOB_STATE_UNKNOWN + - :JOB_STATE_STOPPED + - :JOB_STATE_RUNNING + - :JOB_STATE_DONE + - :JOB_STATE_FAILED + - :JOB_STATE_CANCELLED + - :JOB_STATE_UPDATED + - :JOB_STATE_DRAINING + - :JOB_STATE_DRAINED + - :JOB_STATE_PENDING + - :JOB_STATE_CANCELLING + - :JOB_STATE_QUEUED + - :JOB_STATE_RESOURCE_CLEANING_UP + - !ruby/object:Api::Type::String + name: 'currentStateTime' + description: | + The timestamp associated with the current state. + - !ruby/object:Api::Type::Enum + name: 'requestedState' + description: | + The job's requested state. Applies to `UpdateJob` requests. Set `requested_state` with `UpdateJob` requests to switch between the states `JOB_STATE_STOPPED` and `JOB_STATE_RUNNING`. You can also use `UpdateJob` requests to change a job's state from `JOB_STATE_RUNNING` to `JOB_STATE_CANCELLED`, `JOB_STATE_DONE`, or `JOB_STATE_DRAINED`. These states irrevocably terminate the job if it hasn't already reached a terminal state. This field has no effect on `CreateJob` requests. + values: + - :JOB_STATE_UNKNOWN + - :JOB_STATE_STOPPED + - :JOB_STATE_RUNNING + - :JOB_STATE_DONE + - :JOB_STATE_FAILED + - :JOB_STATE_CANCELLED + - :JOB_STATE_UPDATED + - :JOB_STATE_DRAINING + - :JOB_STATE_DRAINED + - :JOB_STATE_PENDING + - :JOB_STATE_CANCELLING + - :JOB_STATE_QUEUED + - :JOB_STATE_RESOURCE_CLEANING_UP + - !ruby/object:Api::Type::NestedObject + name: 'executionInfo' + description: | + Additional information about how a Cloud Dataflow job will be executed that isn't contained in the submitted job. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'stages' + description: | + A mapping from each stage to the information about that stage. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + Contains information about how a particular google.dataflow.v1beta3.Step will be executed. + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + The timestamp when the job was initially created. Immutable and set by the Cloud Dataflow service. + - !ruby/object:Api::Type::String + name: 'replaceJobId' + description: | + If this job is an update of an existing job, this field is the job ID of the job it replaced. When sending a `CreateJobRequest`, you can update a job by specifying it here. The job named here is stopped, and its intermediate state is transferred to this job. + - !ruby/object:Api::Type::NestedObject + name: 'transformNameMapping' + description: | + The map of transform name prefixes of the job to be replaced to the corresponding name prefixes of the new job. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::String + name: 'clientRequestId' + description: | + The client's unique identifier of the job, re-used across retried attempts. If this field is set, the service will ensure its uniqueness. The request to create a job will fail if the service has knowledge of a previously submitted job with the same client's ID and job name. The caller may use this field to ensure idempotence of job creation across retried attempts to create a job. By default, the field is empty and, in that case, the service ignores it. + - !ruby/object:Api::Type::String + name: 'replacedByJobId' + description: | + If another job is an update of this job (and thus, this job is in `JOB_STATE_UPDATED`), this field contains the ID of that job. + - !ruby/object:Api::Type::Array + name: 'tempFiles' + description: | + A set of files the system should be aware of that are used for temporary storage. These temporary files will be removed on job completion. No duplicates are allowed. No file patterns are supported. The supported files are: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object} + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + User-defined labels for this job. The labels map can contain no more than 64 entries. Entries of the labels map are UTF8 strings that comply with the following restrictions: * Keys must conform to regexp: \p{Ll}\p{Lo}{0,62} * Values must conform to regexp: [\p{Ll}\p{Lo}\p{N}_-]{0,63} * Both keys and values are additionally constrained to be <= 128 bytes in size. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::String + name: 'location' + description: | + The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that contains this job. + - !ruby/object:Api::Type::NestedObject + name: 'pipelineDescription' + description: | + A descriptive representation of submitted pipeline as well as the executed form. This data is provided by the Dataflow service for ease of visualizing the pipeline and interpreting Dataflow provided metrics. + properties: + - !ruby/object:Api::Type::Array + name: 'originalPipelineTransform' + description: | + Description of each transform in the pipeline and collections between them. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::Enum + name: 'kind' + description: | + Type of transform. + values: + - :UNKNOWN_KIND + - :PAR_DO_KIND + - :GROUP_BY_KEY_KIND + - :FLATTEN_KIND + - :READ_KIND + - :WRITE_KIND + - :CONSTANT_KIND + - :SINGLETON_KIND + - :SHUFFLE_KIND + - !ruby/object:Api::Type::String + name: 'id' + description: | + SDK generated id of this transform instance. + - !ruby/object:Api::Type::String + name: 'name' + description: | + User provided name for this transform instance. + - !ruby/object:Api::Type::Array + name: 'displayData' + description: | + Transform-specific display data. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'key' + description: | + The key identifying the display data. This is intended to be used as a label for the display data when viewed in a dax monitoring system. + - !ruby/object:Api::Type::String + name: 'namespace' + description: | + The namespace for the key. This is usually a class name or programming language namespace (i.e. python module) which defines the display data. This allows a dax monitoring system to specially handle the data and perform custom rendering. + - !ruby/object:Api::Type::String + name: 'strValue' + description: | + Contains value if the data is of string type. + - !ruby/object:Api::Type::String + name: 'int64Value' + description: | + Contains value if the data is of int64 type. + - !ruby/object:Api::Type::Integer + name: 'floatValue' + description: | + Contains value if the data is of float type. + - !ruby/object:Api::Type::String + name: 'javaClassValue' + description: | + Contains value if the data is of java class type. + - !ruby/object:Api::Type::String + name: 'timestampValue' + description: | + Contains value if the data is of timestamp type. + - !ruby/object:Api::Type::String + name: 'durationValue' + description: | + Contains value if the data is of duration type. + - !ruby/object:Api::Type::Boolean + name: 'boolValue' + description: | + Contains value if the data is of a boolean type. + - !ruby/object:Api::Type::String + name: 'shortStrValue' + description: | + A possible additional shorter value to display. For example a java_class_name_value of com.mypackage.MyDoFn will be stored with MyDoFn as the short_str_value and com.mypackage.MyDoFn as the java_class_name value. short_str_value can be displayed and java_class_name_value will be displayed as a tooltip. + - !ruby/object:Api::Type::String + name: 'url' + description: | + An optional full URL. + - !ruby/object:Api::Type::String + name: 'label' + description: | + An optional label to display in a dax UI for the element. + - !ruby/object:Api::Type::Array + name: 'outputCollectionName' + description: | + User names for all collection outputs to this transform. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'inputCollectionName' + description: | + User names for all collection inputs to this transform. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'executionPipelineStage' + description: | + Description of each stage of execution of the pipeline. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'name' + description: | + Dataflow service generated name for this stage. + - !ruby/object:Api::Type::String + name: 'id' + description: | + Dataflow service generated id for this stage. + - !ruby/object:Api::Type::Enum + name: 'kind' + description: | + Type of transform this stage is executing. + values: + - :UNKNOWN_KIND + - :PAR_DO_KIND + - :GROUP_BY_KEY_KIND + - :FLATTEN_KIND + - :READ_KIND + - :WRITE_KIND + - :CONSTANT_KIND + - :SINGLETON_KIND + - :SHUFFLE_KIND + - !ruby/object:Api::Type::Array + name: 'inputSource' + description: | + Input sources for this stage. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'userName' + description: | + Human-readable name for this source; may be user or system generated. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Dataflow service generated name for this source. + - !ruby/object:Api::Type::String + name: 'originalTransformOrCollection' + description: | + User name for the original user transform or collection with which this source is most closely associated. + - !ruby/object:Api::Type::String + name: 'sizeBytes' + description: | + Size of the source, if measurable. + - !ruby/object:Api::Type::Array + name: 'outputSource' + description: | + Output sources for this stage. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'userName' + description: | + Human-readable name for this source; may be user or system generated. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Dataflow service generated name for this source. + - !ruby/object:Api::Type::String + name: 'originalTransformOrCollection' + description: | + User name for the original user transform or collection with which this source is most closely associated. + - !ruby/object:Api::Type::String + name: 'sizeBytes' + description: | + Size of the source, if measurable. + - !ruby/object:Api::Type::Array + name: 'prerequisiteStage' + description: | + Other stages that must complete before this stage can run. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'componentTransform' + description: | + Transforms that comprise this execution stage. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'userName' + description: | + Human-readable name for this transform; may be user or system generated. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Dataflow service generated name for this source. + - !ruby/object:Api::Type::String + name: 'originalTransform' + description: | + User name for the original user transform with which this transform is most closely associated. + - !ruby/object:Api::Type::Array + name: 'componentSource' + description: | + Collections produced and consumed by component transforms of this stage. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'userName' + description: | + Human-readable name for this transform; may be user or system generated. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Dataflow service generated name for this source. + - !ruby/object:Api::Type::String + name: 'originalTransformOrCollection' + description: | + User name for the original user transform or collection with which this source is most closely associated. + - !ruby/object:Api::Type::Array + name: 'displayData' + description: | + Pipeline level display data. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'key' + description: | + The key identifying the display data. This is intended to be used as a label for the display data when viewed in a dax monitoring system. + - !ruby/object:Api::Type::String + name: 'namespace' + description: | + The namespace for the key. This is usually a class name or programming language namespace (i.e. python module) which defines the display data. This allows a dax monitoring system to specially handle the data and perform custom rendering. + - !ruby/object:Api::Type::String + name: 'strValue' + description: | + Contains value if the data is of string type. + - !ruby/object:Api::Type::String + name: 'int64Value' + description: | + Contains value if the data is of int64 type. + - !ruby/object:Api::Type::Integer + name: 'floatValue' + description: | + Contains value if the data is of float type. + - !ruby/object:Api::Type::String + name: 'javaClassValue' + description: | + Contains value if the data is of java class type. + - !ruby/object:Api::Type::String + name: 'timestampValue' + description: | + Contains value if the data is of timestamp type. + - !ruby/object:Api::Type::String + name: 'durationValue' + description: | + Contains value if the data is of duration type. + - !ruby/object:Api::Type::Boolean + name: 'boolValue' + description: | + Contains value if the data is of a boolean type. + - !ruby/object:Api::Type::String + name: 'shortStrValue' + description: | + A possible additional shorter value to display. For example a java_class_name_value of com.mypackage.MyDoFn will be stored with MyDoFn as the short_str_value and com.mypackage.MyDoFn as the java_class_name value. short_str_value can be displayed and java_class_name_value will be displayed as a tooltip. + - !ruby/object:Api::Type::String + name: 'url' + description: | + An optional full URL. + - !ruby/object:Api::Type::String + name: 'label' + description: | + An optional label to display in a dax UI for the element. + - !ruby/object:Api::Type::String + name: 'stepNamesHash' + description: | + A hash value of the submitted pipeline portable graph step names if exists. + - !ruby/object:Api::Type::Array + name: 'stageStates' + description: | + This field may be mutated by the Cloud Dataflow service; callers cannot mutate it. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'executionStageName' + description: | + The name of the execution stage. + - !ruby/object:Api::Type::Enum + name: 'executionStageState' + description: | + Executions stage states allow the same set of values as JobState. + values: + - :JOB_STATE_UNKNOWN + - :JOB_STATE_STOPPED + - :JOB_STATE_RUNNING + - :JOB_STATE_DONE + - :JOB_STATE_FAILED + - :JOB_STATE_CANCELLED + - :JOB_STATE_UPDATED + - :JOB_STATE_DRAINING + - :JOB_STATE_DRAINED + - :JOB_STATE_PENDING + - :JOB_STATE_CANCELLING + - :JOB_STATE_QUEUED + - :JOB_STATE_RESOURCE_CLEANING_UP + - !ruby/object:Api::Type::String + name: 'currentStateTime' + description: | + The time at which the stage transitioned to this state. + - !ruby/object:Api::Type::NestedObject + name: 'jobMetadata' + description: | + Metadata available primarily for filtering jobs. Will be included in the ListJob response and Job SUMMARY view. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'sdkVersion' + description: | + The version of the SDK used to run the job. + properties: + - !ruby/object:Api::Type::String + name: 'version' + description: | + The version of the SDK used to run the job. + - !ruby/object:Api::Type::String + name: 'versionDisplayName' + description: | + A readable string describing the version of the SDK. + - !ruby/object:Api::Type::Enum + name: 'sdkSupportStatus' + description: | + The support status for this SDK version. + values: + - :UNKNOWN + - :SUPPORTED + - :STALE + - :DEPRECATED + - :UNSUPPORTED + - !ruby/object:Api::Type::Array + name: 'bugs' + description: | + Output only. Known bugs found in this SDK version. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::Enum + name: 'type' + description: | + Output only. Describes the impact of this SDK bug. + values: + - :TYPE_UNSPECIFIED + - :GENERAL + - :PERFORMANCE + - :DATALOSS + - !ruby/object:Api::Type::Enum + name: 'severity' + description: | + Output only. How severe the SDK bug is. + values: + - :SEVERITY_UNSPECIFIED + - :NOTICE + - :WARNING + - :SEVERE + - !ruby/object:Api::Type::String + name: 'uri' + description: | + Output only. Link to more information on the bug. + - !ruby/object:Api::Type::Array + name: 'spannerDetails' + description: | + Identification of a Spanner source used in the Dataflow job. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'projectId' + description: | + ProjectId accessed in the connection. + - !ruby/object:Api::Type::String + name: 'instanceId' + description: | + InstanceId accessed in the connection. + - !ruby/object:Api::Type::String + name: 'databaseId' + description: | + DatabaseId accessed in the connection. + - !ruby/object:Api::Type::Array + name: 'bigqueryDetails' + description: | + Identification of a BigQuery source used in the Dataflow job. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'table' + description: | + Table accessed in the connection. + - !ruby/object:Api::Type::String + name: 'dataset' + description: | + Dataset accessed in the connection. + - !ruby/object:Api::Type::String + name: 'projectId' + description: | + Project accessed in the connection. + - !ruby/object:Api::Type::String + name: 'query' + description: | + Query used to access data in the connection. + - !ruby/object:Api::Type::Array + name: 'bigTableDetails' + description: | + Identification of a Cloud Bigtable source used in the Dataflow job. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'projectId' + description: | + ProjectId accessed in the connection. + - !ruby/object:Api::Type::String + name: 'instanceId' + description: | + InstanceId accessed in the connection. + - !ruby/object:Api::Type::String + name: 'tableId' + description: | + TableId accessed in the connection. + - !ruby/object:Api::Type::Array + name: 'pubsubDetails' + description: | + Identification of a Pub/Sub source used in the Dataflow job. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'topic' + description: | + Topic accessed in the connection. + - !ruby/object:Api::Type::String + name: 'subscription' + description: | + Subscription used in the connection. + - !ruby/object:Api::Type::Array + name: 'fileDetails' + description: | + Identification of a File source used in the Dataflow job. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'filePattern' + description: | + File Pattern used to access files by the connector. + - !ruby/object:Api::Type::Array + name: 'datastoreDetails' + description: | + Identification of a Datastore source used in the Dataflow job. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'namespace' + description: | + Namespace used in the connection. + - !ruby/object:Api::Type::String + name: 'projectId' + description: | + ProjectId accessed in the connection. + - !ruby/object:Api::Type::NestedObject + name: 'userDisplayProperties' + description: | + List of display properties to help UI filter jobs. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::String + name: 'startTime' + description: | + The timestamp when the job was started (transitioned to JOB_STATE_PENDING). Flexible resource scheduling jobs are started with some delay after job creation, so start_time is unset before start and is updated when the job is started by the Cloud Dataflow service. For other jobs, start_time always equals to create_time and is immutable and set by the Cloud Dataflow service. + - !ruby/object:Api::Type::String + name: 'createdFromSnapshotId' + description: | + If this is specified, the job's initial state is populated from the given snapshot. + - !ruby/object:Api::Type::Boolean + name: 'satisfiesPzs' + description: | + Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. + - !ruby/object:Api::Type::NestedObject + name: 'runtimeUpdatableParams' + description: | + Additional job parameters that can only be updated during runtime using the projects.jobs.update method. These fields have no effect when specified during job creation. + properties: + - !ruby/object:Api::Type::Integer + name: 'maxNumWorkers' + description: | + The maximum number of workers to cap autoscaling at. This field is currently only supported for Streaming Engine jobs. + - !ruby/object:Api::Type::Integer + name: 'minNumWorkers' + description: | + The minimum number of workers to scale down to. This field is currently only supported for Streaming Engine jobs. + - !ruby/object:Api::Type::Integer + name: 'workerUtilizationHint' + description: | + Target worker utilization, compared against the aggregate utilization of the worker pool by autoscaler, to determine upscaling and downscaling when absent other constraints such as backlog. + - !ruby/object:Api::Type::Boolean + name: 'satisfiesPzi' + description: | + Output only. Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. + + +--- !ruby/object:Api::Product +name: dataflow +display_name: dataflow +versions: + - !ruby/object:Api::Product::Version + name: ga + base_url: https://dataflow.googleapis.com//v1b3/ +scopes: + - https://dataflow.googleapis.com//auth/cloud-platform +apis_required: + - !ruby/object:Api::Product::ApiReference + name: https://dataflow.googleapis.com/ + url: https://console.cloud.google.com/apis/library/dataflow.googleapis.com/ +objects: + + - !ruby/object:Api::Resource + name: ProjectLocationJob + base_url: 'projects/{{projectId}}/locations/{{location}}/jobs' + self_link: 'projects/{{projectId}}/locations/{{location}}/jobs/{{jobId}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/dataflow/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + Defines a job to be run by the Cloud Dataflow service. Do not enter confidential information when you supply string values using the API. + properties: + + - !ruby/object:Api::Type::String + name: 'id' + description: | + The unique ID of this job. This field is set by the Dataflow service when the job is created, and is immutable for the life of the job. + - !ruby/object:Api::Type::String + name: 'projectId' + description: | + The ID of the Google Cloud project that the job belongs to. + - !ruby/object:Api::Type::String + name: 'name' + description: | + The user-specified Dataflow job name. Only one active job with a given name can exist in a project within one region at any given time. Jobs in different regions can have the same name. If a caller attempts to create a job with the same name as an active job that already exists, the attempt returns the existing job. The name must match the regular expression `[a-z]([-a-z0-9]{0,1022}[a-z0-9])?` + - !ruby/object:Api::Type::Enum + name: 'type' + description: | + The type of Dataflow job. + values: + - :JOB_TYPE_UNKNOWN + - :JOB_TYPE_BATCH + - :JOB_TYPE_STREAMING + - !ruby/object:Api::Type::NestedObject + name: 'environment' + description: | + Describes the environment in which a Dataflow Job runs. + properties: + - !ruby/object:Api::Type::String + name: 'tempStoragePrefix' + description: | + The prefix of the resources the system should use for temporary storage. The system will append the suffix "/temp-{JOBNAME} to this resource prefix, where {JOBNAME} is the value of the job_name field. The resulting bucket and object prefix is used as the prefix of the resources used to store temporary data needed during the job execution. NOTE: This will override the value in taskrunner_settings. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object} + - !ruby/object:Api::Type::String + name: 'clusterManagerApiService' + description: | + The type of cluster manager API to use. If unknown or unspecified, the service will attempt to choose a reasonable default. This should be in the form of the API service name, e.g. "compute.googleapis.com". + - !ruby/object:Api::Type::Array + name: 'experiments' + description: | + The list of experiments to enable. This field should be used for SDK related experiments and not for service related experiments. The proper field for service related experiments is service_options. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'serviceOptions' + description: | + The list of service options to enable. This field should be used for service related experiments only. These experiments, when graduating to GA, should be replaced by dedicated fields or become default (i.e. always on). + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'serviceKmsKeyName' + description: | + If set, contains the Cloud KMS key identifier used to encrypt data at rest, AKA a Customer Managed Encryption Key (CMEK). Format: projects/PROJECT_ID/locations/LOCATION/keyRings/KEY_RING/cryptoKeys/KEY + - !ruby/object:Api::Type::Array + name: 'workerPools' + description: | + The worker pools. At least one "harness" worker pool must be specified in order for the job to have workers. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'kind' + description: | + The kind of the worker pool; currently only `harness` and `shuffle` are supported. + - !ruby/object:Api::Type::Integer + name: 'numWorkers' + description: | + Number of Google Compute Engine workers in this pool needed to execute the job. If zero or unspecified, the service will attempt to choose a reasonable default. + - !ruby/object:Api::Type::Array + name: 'packages' + description: | + Packages to be installed on workers. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'name' + description: | + The name of the package. + - !ruby/object:Api::Type::String + name: 'location' + description: | + The resource to read the package from. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket} bucket.storage.googleapis.com/ + - !ruby/object:Api::Type::Enum + name: 'defaultPackageSet' + description: | + The default package set to install. This allows the service to select a default set of packages which are useful to worker harnesses written in a particular language. + values: + - :DEFAULT_PACKAGE_SET_UNKNOWN + - :DEFAULT_PACKAGE_SET_NONE + - :DEFAULT_PACKAGE_SET_JAVA + - :DEFAULT_PACKAGE_SET_PYTHON + - !ruby/object:Api::Type::String + name: 'machineType' + description: | + Machine type (e.g. "n1-standard-1"). If empty or unspecified, the service will attempt to choose a reasonable default. + - !ruby/object:Api::Type::Enum + name: 'teardownPolicy' + description: | + Sets the policy for determining when to turndown worker pool. Allowed values are: `TEARDOWN_ALWAYS`, `TEARDOWN_ON_SUCCESS`, and `TEARDOWN_NEVER`. `TEARDOWN_ALWAYS` means workers are always torn down regardless of whether the job succeeds. `TEARDOWN_ON_SUCCESS` means workers are torn down if the job succeeds. `TEARDOWN_NEVER` means the workers are never torn down. If the workers are not torn down by the service, they will continue to run and use Google Compute Engine VM resources in the user's project until they are explicitly terminated by the user. Because of this, Google recommends using the `TEARDOWN_ALWAYS` policy except for small, manually supervised test jobs. If unknown or unspecified, the service will attempt to choose a reasonable default. + values: + - :TEARDOWN_POLICY_UNKNOWN + - :TEARDOWN_ALWAYS + - :TEARDOWN_ON_SUCCESS + - :TEARDOWN_NEVER + - !ruby/object:Api::Type::Integer + name: 'diskSizeGb' + description: | + Size of root disk for VMs, in GB. If zero or unspecified, the service will attempt to choose a reasonable default. + - !ruby/object:Api::Type::String + name: 'diskType' + description: | + Type of root disk for VMs. If empty or unspecified, the service will attempt to choose a reasonable default. + - !ruby/object:Api::Type::String + name: 'diskSourceImage' + description: | + Fully qualified source image for disks. + - !ruby/object:Api::Type::String + name: 'zone' + description: | + Zone to run the worker pools in. If empty or unspecified, the service will attempt to choose a reasonable default. + - !ruby/object:Api::Type::NestedObject + name: 'taskrunnerSettings' + description: | + Taskrunner configuration settings. + properties: + - !ruby/object:Api::Type::String + name: 'taskUser' + description: | + The UNIX user ID on the worker VM to use for tasks launched by taskrunner; e.g. "root". + - !ruby/object:Api::Type::String + name: 'taskGroup' + description: | + The UNIX group ID on the worker VM to use for tasks launched by taskrunner; e.g. "wheel". + - !ruby/object:Api::Type::Array + name: 'oauthScopes' + description: | + The OAuth2 scopes to be requested by the taskrunner in order to access the Cloud Dataflow API. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'baseUrl' + description: | + The base URL for the taskrunner to use when accessing Google Cloud APIs. When workers access Google Cloud APIs, they logically do so via relative URLs. If this field is specified, it supplies the base URL to use for resolving these relative URLs. The normative algorithm used is defined by RFC 1808, "Relative Uniform Resource Locators". If not specified, the default value is "http://www.googleapis.com/" + - !ruby/object:Api::Type::String + name: 'dataflowApiVersion' + description: | + The API version of endpoint, e.g. "v1b3" + - !ruby/object:Api::Type::NestedObject + name: 'parallelWorkerSettings' + description: | + Provides data to pass through to the worker harness. + properties: + - !ruby/object:Api::Type::String + name: 'baseUrl' + description: | + The base URL for accessing Google Cloud APIs. When workers access Google Cloud APIs, they logically do so via relative URLs. If this field is specified, it supplies the base URL to use for resolving these relative URLs. The normative algorithm used is defined by RFC 1808, "Relative Uniform Resource Locators". If not specified, the default value is "http://www.googleapis.com/" + - !ruby/object:Api::Type::Boolean + name: 'reportingEnabled' + description: | + Whether to send work progress updates to the service. + - !ruby/object:Api::Type::String + name: 'servicePath' + description: | + The Cloud Dataflow service path relative to the root URL, for example, "dataflow/v1b3/projects". + - !ruby/object:Api::Type::String + name: 'shuffleServicePath' + description: | + The Shuffle service path relative to the root URL, for example, "shuffle/v1beta1". + - !ruby/object:Api::Type::String + name: 'workerId' + description: | + The ID of the worker running this pipeline. + - !ruby/object:Api::Type::String + name: 'tempStoragePrefix' + description: | + The prefix of the resources the system should use for temporary storage. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object} + - !ruby/object:Api::Type::String + name: 'baseTaskDir' + description: | + The location on the worker for task-specific subdirectories. + - !ruby/object:Api::Type::Boolean + name: 'continueOnException' + description: | + Whether to continue taskrunner if an exception is hit. + - !ruby/object:Api::Type::Boolean + name: 'logToSerialconsole' + description: | + Whether to send taskrunner log info to Google Compute Engine VM serial console. + - !ruby/object:Api::Type::Boolean + name: 'alsologtostderr' + description: | + Whether to also send taskrunner log info to stderr. + - !ruby/object:Api::Type::String + name: 'logUploadLocation' + description: | + Indicates where to put logs. If this is not specified, the logs will not be uploaded. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object} + - !ruby/object:Api::Type::String + name: 'logDir' + description: | + The directory on the VM to store logs. + - !ruby/object:Api::Type::String + name: 'tempStoragePrefix' + description: | + The prefix of the resources the taskrunner should use for temporary storage. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object} + - !ruby/object:Api::Type::String + name: 'harnessCommand' + description: | + The command to launch the worker harness. + - !ruby/object:Api::Type::String + name: 'workflowFileName' + description: | + The file to store the workflow in. + - !ruby/object:Api::Type::String + name: 'commandlinesFileName' + description: | + The file to store preprocessing commands in. + - !ruby/object:Api::Type::String + name: 'vmId' + description: | + The ID string of the VM. + - !ruby/object:Api::Type::String + name: 'languageHint' + description: | + The suggested backend language. + - !ruby/object:Api::Type::String + name: 'streamingWorkerMainClass' + description: | + The streaming worker main class name. + - !ruby/object:Api::Type::String + name: 'onHostMaintenance' + description: | + The action to take on host maintenance, as defined by the Google Compute Engine API. + - !ruby/object:Api::Type::Array + name: 'dataDisks' + description: | + Data disks that are used by a VM in this workflow. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::Integer + name: 'sizeGb' + description: | + Size of disk in GB. If zero or unspecified, the service will attempt to choose a reasonable default. + - !ruby/object:Api::Type::String + name: 'diskType' + description: | + Disk storage type, as defined by Google Compute Engine. This must be a disk type appropriate to the project and zone in which the workers will run. If unknown or unspecified, the service will attempt to choose a reasonable default. For example, the standard persistent disk type is a resource name typically ending in "pd-standard". If SSD persistent disks are available, the resource name typically ends with "pd-ssd". The actual valid values are defined the Google Compute Engine API, not by the Cloud Dataflow API; consult the Google Compute Engine documentation for more information about determining the set of available disk types for a particular project and zone. Google Compute Engine Disk types are local to a particular project in a particular zone, and so the resource name will typically look something like this: compute.googleapis.com/projects/project-id/zones/zone/diskTypes/pd-standard + - !ruby/object:Api::Type::String + name: 'mountPoint' + description: | + Directory in a VM where disk is mounted. + - !ruby/object:Api::Type::NestedObject + name: 'metadata' + description: | + Metadata to set on the Google Compute Engine VMs. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'autoscalingSettings' + description: | + Settings for WorkerPool autoscaling. + properties: + - !ruby/object:Api::Type::Enum + name: 'algorithm' + description: | + The algorithm to use for autoscaling. + values: + - :AUTOSCALING_ALGORITHM_UNKNOWN + - :AUTOSCALING_ALGORITHM_NONE + - :AUTOSCALING_ALGORITHM_BASIC + - !ruby/object:Api::Type::Integer + name: 'maxNumWorkers' + description: | + The maximum number of workers to cap scaling at. + - !ruby/object:Api::Type::NestedObject + name: 'poolArgs' + description: | + Extra arguments for this worker pool. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + Properties of the object. Contains field @type with type URL. + - !ruby/object:Api::Type::String + name: 'network' + description: | + Network to which VMs will be assigned. If empty or unspecified, the service will use the network "default". + - !ruby/object:Api::Type::String + name: 'subnetwork' + description: | + Subnetwork to which VMs will be assigned, if desired. Expected to be of the form "regions/REGION/subnetworks/SUBNETWORK". + - !ruby/object:Api::Type::String + name: 'workerHarnessContainerImage' + description: | + Required. Docker container image that executes the Cloud Dataflow worker harness, residing in Google Container Registry. Deprecated for the Fn API path. Use sdk_harness_container_images instead. + - !ruby/object:Api::Type::Integer + name: 'numThreadsPerWorker' + description: | + The number of threads per worker harness. If empty or unspecified, the service will choose a number of threads (according to the number of cores on the selected machine type for batch, or 1 by convention for streaming). + - !ruby/object:Api::Type::Enum + name: 'ipConfiguration' + description: | + Configuration for VM IPs. + values: + - :WORKER_IP_UNSPECIFIED + - :WORKER_IP_PUBLIC + - :WORKER_IP_PRIVATE + - !ruby/object:Api::Type::Array + name: 'sdkHarnessContainerImages' + description: | + Set of SDK harness containers needed to execute this pipeline. This will only be set in the Fn API path. For non-cross-language pipelines this should have only one entry. Cross-language pipelines will have two or more entries. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'containerImage' + description: | + A docker container image that resides in Google Container Registry. + - !ruby/object:Api::Type::Boolean + name: 'useSingleCorePerContainer' + description: | + If true, recommends the Dataflow service to use only one core per SDK container instance with this image. If false (or unset) recommends using more than one core per SDK container instance with this image for efficiency. Note that Dataflow service may choose to override this property if needed. + - !ruby/object:Api::Type::String + name: 'environmentId' + description: | + Environment ID for the Beam runner API proto Environment that corresponds to the current SDK Harness. + - !ruby/object:Api::Type::Array + name: 'capabilities' + description: | + The set of capabilities enumerated in the above Environment proto. See also [beam_runner_api.proto](https://github.com/apache/beam/blob/master/model/pipeline/src/main/proto/org/apache/beam/model/pipeline/v1/beam_runner_api.proto) + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'userAgent' + description: | + A description of the process that generated the request. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + Properties of the object. + - !ruby/object:Api::Type::NestedObject + name: 'version' + description: | + A structure describing which components and their versions of the service are required in order to run the job. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + Properties of the object. + - !ruby/object:Api::Type::String + name: 'dataset' + description: | + The dataset for the current project where various workflow related tables are stored. The supported resource type is: Google BigQuery: bigquery.googleapis.com/{dataset} + - !ruby/object:Api::Type::NestedObject + name: 'sdkPipelineOptions' + description: | + The Cloud Dataflow SDK pipeline options specified by the user. These options are passed through the service and are used to recreate the SDK pipeline options on the worker in a language agnostic and platform independent way. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + Properties of the object. + - !ruby/object:Api::Type::NestedObject + name: 'internalExperiments' + description: | + Experimental settings. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + Properties of the object. Contains field @type with type URL. + - !ruby/object:Api::Type::String + name: 'serviceAccountEmail' + description: | + Identity to run virtual machines as. Defaults to the default account. + - !ruby/object:Api::Type::Enum + name: 'flexResourceSchedulingGoal' + description: | + Which Flexible Resource Scheduling mode to run in. + values: + - :FLEXRS_UNSPECIFIED + - :FLEXRS_SPEED_OPTIMIZED + - :FLEXRS_COST_OPTIMIZED + - !ruby/object:Api::Type::String + name: 'workerRegion' + description: | + The Compute Engine region (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. "us-west1". Mutually exclusive with worker_zone. If neither worker_region nor worker_zone is specified, default to the control plane's region. + - !ruby/object:Api::Type::String + name: 'workerZone' + description: | + The Compute Engine zone (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. "us-west1-a". Mutually exclusive with worker_region. If neither worker_region nor worker_zone is specified, a zone in the control plane's region is chosen based on available capacity. + - !ruby/object:Api::Type::Enum + name: 'shuffleMode' + description: | + Output only. The shuffle mode used for the job. + values: + - :SHUFFLE_MODE_UNSPECIFIED + - :VM_BASED + - :SERVICE_BASED + - !ruby/object:Api::Type::NestedObject + name: 'debugOptions' + description: | + Describes any options that have an effect on the debugging of pipelines. + properties: + - !ruby/object:Api::Type::Boolean + name: 'enableHotKeyLogging' + description: | + When true, enables the logging of the literal hot key to the user's Cloud Logging. + - !ruby/object:Api::Type::NestedObject + name: 'dataSampling' + description: | + Configuration options for sampling elements. + properties: + - !ruby/object:Api::Type::Array + name: 'behaviors' + description: | + List of given sampling behaviors to enable. For example, specifying behaviors = [ALWAYS_ON] samples in-flight elements but does not sample exceptions. Can be used to specify multiple behaviors like, behaviors = [ALWAYS_ON, EXCEPTIONS] for specifying periodic sampling and exception sampling. If DISABLED is in the list, then sampling will be disabled and ignore the other given behaviors. Ordering does not matter. + item_type: Api::Type::String + - !ruby/object:Api::Type::Boolean + name: 'useStreamingEngineResourceBasedBilling' + description: | + Output only. Whether the job uses the Streaming Engine resource-based billing model. + - !ruby/object:Api::Type::Enum + name: 'streamingMode' + description: | + Optional. Specifies the Streaming Engine message processing guarantees. Reduces cost and latency but might result in duplicate messages committed to storage. Designed to run simple mapping streaming ETL jobs at the lowest cost. For example, Change Data Capture (CDC) to BigQuery is a canonical use case. + values: + - :STREAMING_MODE_UNSPECIFIED + - :STREAMING_MODE_EXACTLY_ONCE + - :STREAMING_MODE_AT_LEAST_ONCE + - !ruby/object:Api::Type::Array + name: 'steps' + description: | + Exactly one of step or steps_location should be specified. The top-level steps that constitute the entire job. Only retrieved with JOB_VIEW_ALL. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'kind' + description: | + The kind of step in the Cloud Dataflow job. + - !ruby/object:Api::Type::String + name: 'name' + description: | + The name that identifies the step. This must be unique for each step with respect to all other steps in the Cloud Dataflow job. + - !ruby/object:Api::Type::NestedObject + name: 'properties' + description: | + Named properties associated with the step. Each kind of predefined step has its own required set of properties. Must be provided on Create. Only retrieved with JOB_VIEW_ALL. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + Properties of the object. + - !ruby/object:Api::Type::String + name: 'stepsLocation' + description: | + The Cloud Storage location where the steps are stored. + - !ruby/object:Api::Type::Enum + name: 'currentState' + description: | + The current state of the job. Jobs are created in the `JOB_STATE_STOPPED` state unless otherwise specified. A job in the `JOB_STATE_RUNNING` state may asynchronously enter a terminal state. After a job has reached a terminal state, no further state updates may be made. This field might be mutated by the Dataflow service; callers cannot mutate it. + values: + - :JOB_STATE_UNKNOWN + - :JOB_STATE_STOPPED + - :JOB_STATE_RUNNING + - :JOB_STATE_DONE + - :JOB_STATE_FAILED + - :JOB_STATE_CANCELLED + - :JOB_STATE_UPDATED + - :JOB_STATE_DRAINING + - :JOB_STATE_DRAINED + - :JOB_STATE_PENDING + - :JOB_STATE_CANCELLING + - :JOB_STATE_QUEUED + - :JOB_STATE_RESOURCE_CLEANING_UP + - !ruby/object:Api::Type::String + name: 'currentStateTime' + description: | + The timestamp associated with the current state. + - !ruby/object:Api::Type::Enum + name: 'requestedState' + description: | + The job's requested state. Applies to `UpdateJob` requests. Set `requested_state` with `UpdateJob` requests to switch between the states `JOB_STATE_STOPPED` and `JOB_STATE_RUNNING`. You can also use `UpdateJob` requests to change a job's state from `JOB_STATE_RUNNING` to `JOB_STATE_CANCELLED`, `JOB_STATE_DONE`, or `JOB_STATE_DRAINED`. These states irrevocably terminate the job if it hasn't already reached a terminal state. This field has no effect on `CreateJob` requests. + values: + - :JOB_STATE_UNKNOWN + - :JOB_STATE_STOPPED + - :JOB_STATE_RUNNING + - :JOB_STATE_DONE + - :JOB_STATE_FAILED + - :JOB_STATE_CANCELLED + - :JOB_STATE_UPDATED + - :JOB_STATE_DRAINING + - :JOB_STATE_DRAINED + - :JOB_STATE_PENDING + - :JOB_STATE_CANCELLING + - :JOB_STATE_QUEUED + - :JOB_STATE_RESOURCE_CLEANING_UP + - !ruby/object:Api::Type::NestedObject + name: 'executionInfo' + description: | + Additional information about how a Cloud Dataflow job will be executed that isn't contained in the submitted job. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'stages' + description: | + A mapping from each stage to the information about that stage. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + Contains information about how a particular google.dataflow.v1beta3.Step will be executed. + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + The timestamp when the job was initially created. Immutable and set by the Cloud Dataflow service. + - !ruby/object:Api::Type::String + name: 'replaceJobId' + description: | + If this job is an update of an existing job, this field is the job ID of the job it replaced. When sending a `CreateJobRequest`, you can update a job by specifying it here. The job named here is stopped, and its intermediate state is transferred to this job. + - !ruby/object:Api::Type::NestedObject + name: 'transformNameMapping' + description: | + The map of transform name prefixes of the job to be replaced to the corresponding name prefixes of the new job. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::String + name: 'clientRequestId' + description: | + The client's unique identifier of the job, re-used across retried attempts. If this field is set, the service will ensure its uniqueness. The request to create a job will fail if the service has knowledge of a previously submitted job with the same client's ID and job name. The caller may use this field to ensure idempotence of job creation across retried attempts to create a job. By default, the field is empty and, in that case, the service ignores it. + - !ruby/object:Api::Type::String + name: 'replacedByJobId' + description: | + If another job is an update of this job (and thus, this job is in `JOB_STATE_UPDATED`), this field contains the ID of that job. + - !ruby/object:Api::Type::Array + name: 'tempFiles' + description: | + A set of files the system should be aware of that are used for temporary storage. These temporary files will be removed on job completion. No duplicates are allowed. No file patterns are supported. The supported files are: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object} + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + User-defined labels for this job. The labels map can contain no more than 64 entries. Entries of the labels map are UTF8 strings that comply with the following restrictions: * Keys must conform to regexp: \p{Ll}\p{Lo}{0,62} * Values must conform to regexp: [\p{Ll}\p{Lo}\p{N}_-]{0,63} * Both keys and values are additionally constrained to be <= 128 bytes in size. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::String + name: 'location' + description: | + The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that contains this job. + - !ruby/object:Api::Type::NestedObject + name: 'pipelineDescription' + description: | + A descriptive representation of submitted pipeline as well as the executed form. This data is provided by the Dataflow service for ease of visualizing the pipeline and interpreting Dataflow provided metrics. + properties: + - !ruby/object:Api::Type::Array + name: 'originalPipelineTransform' + description: | + Description of each transform in the pipeline and collections between them. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::Enum + name: 'kind' + description: | + Type of transform. + values: + - :UNKNOWN_KIND + - :PAR_DO_KIND + - :GROUP_BY_KEY_KIND + - :FLATTEN_KIND + - :READ_KIND + - :WRITE_KIND + - :CONSTANT_KIND + - :SINGLETON_KIND + - :SHUFFLE_KIND + - !ruby/object:Api::Type::String + name: 'id' + description: | + SDK generated id of this transform instance. + - !ruby/object:Api::Type::String + name: 'name' + description: | + User provided name for this transform instance. + - !ruby/object:Api::Type::Array + name: 'displayData' + description: | + Transform-specific display data. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'key' + description: | + The key identifying the display data. This is intended to be used as a label for the display data when viewed in a dax monitoring system. + - !ruby/object:Api::Type::String + name: 'namespace' + description: | + The namespace for the key. This is usually a class name or programming language namespace (i.e. python module) which defines the display data. This allows a dax monitoring system to specially handle the data and perform custom rendering. + - !ruby/object:Api::Type::String + name: 'strValue' + description: | + Contains value if the data is of string type. + - !ruby/object:Api::Type::String + name: 'int64Value' + description: | + Contains value if the data is of int64 type. + - !ruby/object:Api::Type::Integer + name: 'floatValue' + description: | + Contains value if the data is of float type. + - !ruby/object:Api::Type::String + name: 'javaClassValue' + description: | + Contains value if the data is of java class type. + - !ruby/object:Api::Type::String + name: 'timestampValue' + description: | + Contains value if the data is of timestamp type. + - !ruby/object:Api::Type::String + name: 'durationValue' + description: | + Contains value if the data is of duration type. + - !ruby/object:Api::Type::Boolean + name: 'boolValue' + description: | + Contains value if the data is of a boolean type. + - !ruby/object:Api::Type::String + name: 'shortStrValue' + description: | + A possible additional shorter value to display. For example a java_class_name_value of com.mypackage.MyDoFn will be stored with MyDoFn as the short_str_value and com.mypackage.MyDoFn as the java_class_name value. short_str_value can be displayed and java_class_name_value will be displayed as a tooltip. + - !ruby/object:Api::Type::String + name: 'url' + description: | + An optional full URL. + - !ruby/object:Api::Type::String + name: 'label' + description: | + An optional label to display in a dax UI for the element. + - !ruby/object:Api::Type::Array + name: 'outputCollectionName' + description: | + User names for all collection outputs to this transform. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'inputCollectionName' + description: | + User names for all collection inputs to this transform. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'executionPipelineStage' + description: | + Description of each stage of execution of the pipeline. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'name' + description: | + Dataflow service generated name for this stage. + - !ruby/object:Api::Type::String + name: 'id' + description: | + Dataflow service generated id for this stage. + - !ruby/object:Api::Type::Enum + name: 'kind' + description: | + Type of transform this stage is executing. + values: + - :UNKNOWN_KIND + - :PAR_DO_KIND + - :GROUP_BY_KEY_KIND + - :FLATTEN_KIND + - :READ_KIND + - :WRITE_KIND + - :CONSTANT_KIND + - :SINGLETON_KIND + - :SHUFFLE_KIND + - !ruby/object:Api::Type::Array + name: 'inputSource' + description: | + Input sources for this stage. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'userName' + description: | + Human-readable name for this source; may be user or system generated. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Dataflow service generated name for this source. + - !ruby/object:Api::Type::String + name: 'originalTransformOrCollection' + description: | + User name for the original user transform or collection with which this source is most closely associated. + - !ruby/object:Api::Type::String + name: 'sizeBytes' + description: | + Size of the source, if measurable. + - !ruby/object:Api::Type::Array + name: 'outputSource' + description: | + Output sources for this stage. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'userName' + description: | + Human-readable name for this source; may be user or system generated. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Dataflow service generated name for this source. + - !ruby/object:Api::Type::String + name: 'originalTransformOrCollection' + description: | + User name for the original user transform or collection with which this source is most closely associated. + - !ruby/object:Api::Type::String + name: 'sizeBytes' + description: | + Size of the source, if measurable. + - !ruby/object:Api::Type::Array + name: 'prerequisiteStage' + description: | + Other stages that must complete before this stage can run. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'componentTransform' + description: | + Transforms that comprise this execution stage. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'userName' + description: | + Human-readable name for this transform; may be user or system generated. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Dataflow service generated name for this source. + - !ruby/object:Api::Type::String + name: 'originalTransform' + description: | + User name for the original user transform with which this transform is most closely associated. + - !ruby/object:Api::Type::Array + name: 'componentSource' + description: | + Collections produced and consumed by component transforms of this stage. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'userName' + description: | + Human-readable name for this transform; may be user or system generated. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Dataflow service generated name for this source. + - !ruby/object:Api::Type::String + name: 'originalTransformOrCollection' + description: | + User name for the original user transform or collection with which this source is most closely associated. + - !ruby/object:Api::Type::Array + name: 'displayData' + description: | + Pipeline level display data. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'key' + description: | + The key identifying the display data. This is intended to be used as a label for the display data when viewed in a dax monitoring system. + - !ruby/object:Api::Type::String + name: 'namespace' + description: | + The namespace for the key. This is usually a class name or programming language namespace (i.e. python module) which defines the display data. This allows a dax monitoring system to specially handle the data and perform custom rendering. + - !ruby/object:Api::Type::String + name: 'strValue' + description: | + Contains value if the data is of string type. + - !ruby/object:Api::Type::String + name: 'int64Value' + description: | + Contains value if the data is of int64 type. + - !ruby/object:Api::Type::Integer + name: 'floatValue' + description: | + Contains value if the data is of float type. + - !ruby/object:Api::Type::String + name: 'javaClassValue' + description: | + Contains value if the data is of java class type. + - !ruby/object:Api::Type::String + name: 'timestampValue' + description: | + Contains value if the data is of timestamp type. + - !ruby/object:Api::Type::String + name: 'durationValue' + description: | + Contains value if the data is of duration type. + - !ruby/object:Api::Type::Boolean + name: 'boolValue' + description: | + Contains value if the data is of a boolean type. + - !ruby/object:Api::Type::String + name: 'shortStrValue' + description: | + A possible additional shorter value to display. For example a java_class_name_value of com.mypackage.MyDoFn will be stored with MyDoFn as the short_str_value and com.mypackage.MyDoFn as the java_class_name value. short_str_value can be displayed and java_class_name_value will be displayed as a tooltip. + - !ruby/object:Api::Type::String + name: 'url' + description: | + An optional full URL. + - !ruby/object:Api::Type::String + name: 'label' + description: | + An optional label to display in a dax UI for the element. + - !ruby/object:Api::Type::String + name: 'stepNamesHash' + description: | + A hash value of the submitted pipeline portable graph step names if exists. + - !ruby/object:Api::Type::Array + name: 'stageStates' + description: | + This field may be mutated by the Cloud Dataflow service; callers cannot mutate it. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'executionStageName' + description: | + The name of the execution stage. + - !ruby/object:Api::Type::Enum + name: 'executionStageState' + description: | + Executions stage states allow the same set of values as JobState. + values: + - :JOB_STATE_UNKNOWN + - :JOB_STATE_STOPPED + - :JOB_STATE_RUNNING + - :JOB_STATE_DONE + - :JOB_STATE_FAILED + - :JOB_STATE_CANCELLED + - :JOB_STATE_UPDATED + - :JOB_STATE_DRAINING + - :JOB_STATE_DRAINED + - :JOB_STATE_PENDING + - :JOB_STATE_CANCELLING + - :JOB_STATE_QUEUED + - :JOB_STATE_RESOURCE_CLEANING_UP + - !ruby/object:Api::Type::String + name: 'currentStateTime' + description: | + The time at which the stage transitioned to this state. + - !ruby/object:Api::Type::NestedObject + name: 'jobMetadata' + description: | + Metadata available primarily for filtering jobs. Will be included in the ListJob response and Job SUMMARY view. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'sdkVersion' + description: | + The version of the SDK used to run the job. + properties: + - !ruby/object:Api::Type::String + name: 'version' + description: | + The version of the SDK used to run the job. + - !ruby/object:Api::Type::String + name: 'versionDisplayName' + description: | + A readable string describing the version of the SDK. + - !ruby/object:Api::Type::Enum + name: 'sdkSupportStatus' + description: | + The support status for this SDK version. + values: + - :UNKNOWN + - :SUPPORTED + - :STALE + - :DEPRECATED + - :UNSUPPORTED + - !ruby/object:Api::Type::Array + name: 'bugs' + description: | + Output only. Known bugs found in this SDK version. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::Enum + name: 'type' + description: | + Output only. Describes the impact of this SDK bug. + values: + - :TYPE_UNSPECIFIED + - :GENERAL + - :PERFORMANCE + - :DATALOSS + - !ruby/object:Api::Type::Enum + name: 'severity' + description: | + Output only. How severe the SDK bug is. + values: + - :SEVERITY_UNSPECIFIED + - :NOTICE + - :WARNING + - :SEVERE + - !ruby/object:Api::Type::String + name: 'uri' + description: | + Output only. Link to more information on the bug. + - !ruby/object:Api::Type::Array + name: 'spannerDetails' + description: | + Identification of a Spanner source used in the Dataflow job. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'projectId' + description: | + ProjectId accessed in the connection. + - !ruby/object:Api::Type::String + name: 'instanceId' + description: | + InstanceId accessed in the connection. + - !ruby/object:Api::Type::String + name: 'databaseId' + description: | + DatabaseId accessed in the connection. + - !ruby/object:Api::Type::Array + name: 'bigqueryDetails' + description: | + Identification of a BigQuery source used in the Dataflow job. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'table' + description: | + Table accessed in the connection. + - !ruby/object:Api::Type::String + name: 'dataset' + description: | + Dataset accessed in the connection. + - !ruby/object:Api::Type::String + name: 'projectId' + description: | + Project accessed in the connection. + - !ruby/object:Api::Type::String + name: 'query' + description: | + Query used to access data in the connection. + - !ruby/object:Api::Type::Array + name: 'bigTableDetails' + description: | + Identification of a Cloud Bigtable source used in the Dataflow job. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'projectId' + description: | + ProjectId accessed in the connection. + - !ruby/object:Api::Type::String + name: 'instanceId' + description: | + InstanceId accessed in the connection. + - !ruby/object:Api::Type::String + name: 'tableId' + description: | + TableId accessed in the connection. + - !ruby/object:Api::Type::Array + name: 'pubsubDetails' + description: | + Identification of a Pub/Sub source used in the Dataflow job. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'topic' + description: | + Topic accessed in the connection. + - !ruby/object:Api::Type::String + name: 'subscription' + description: | + Subscription used in the connection. + - !ruby/object:Api::Type::Array + name: 'fileDetails' + description: | + Identification of a File source used in the Dataflow job. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'filePattern' + description: | + File Pattern used to access files by the connector. + - !ruby/object:Api::Type::Array + name: 'datastoreDetails' + description: | + Identification of a Datastore source used in the Dataflow job. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'namespace' + description: | + Namespace used in the connection. + - !ruby/object:Api::Type::String + name: 'projectId' + description: | + ProjectId accessed in the connection. + - !ruby/object:Api::Type::NestedObject + name: 'userDisplayProperties' + description: | + List of display properties to help UI filter jobs. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::String + name: 'startTime' + description: | + The timestamp when the job was started (transitioned to JOB_STATE_PENDING). Flexible resource scheduling jobs are started with some delay after job creation, so start_time is unset before start and is updated when the job is started by the Cloud Dataflow service. For other jobs, start_time always equals to create_time and is immutable and set by the Cloud Dataflow service. + - !ruby/object:Api::Type::String + name: 'createdFromSnapshotId' + description: | + If this is specified, the job's initial state is populated from the given snapshot. + - !ruby/object:Api::Type::Boolean + name: 'satisfiesPzs' + description: | + Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. + - !ruby/object:Api::Type::NestedObject + name: 'runtimeUpdatableParams' + description: | + Additional job parameters that can only be updated during runtime using the projects.jobs.update method. These fields have no effect when specified during job creation. + properties: + - !ruby/object:Api::Type::Integer + name: 'maxNumWorkers' + description: | + The maximum number of workers to cap autoscaling at. This field is currently only supported for Streaming Engine jobs. + - !ruby/object:Api::Type::Integer + name: 'minNumWorkers' + description: | + The minimum number of workers to scale down to. This field is currently only supported for Streaming Engine jobs. + - !ruby/object:Api::Type::Integer + name: 'workerUtilizationHint' + description: | + Target worker utilization, compared against the aggregate utilization of the worker pool by autoscaler, to determine upscaling and downscaling when absent other constraints such as backlog. + - !ruby/object:Api::Type::Boolean + name: 'satisfiesPzi' + description: | + Output only. Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. + diff --git a/mmv1/products/dataflow/inspec.yaml b/mmv1/products/dataflow/inspec.yaml new file mode 100644 index 000000000..28cb365e9 --- /dev/null +++ b/mmv1/products/dataflow/inspec.yaml @@ -0,0 +1,15 @@ +# Copyright 2017 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- !ruby/object:Provider::Inspec::Config +overrides: !ruby/object:Overrides::ResourceOverrides diff --git a/mmv1/products/datafusion/api.yaml b/mmv1/products/datafusion/api.yaml index 48233a500..9d7ae28f9 100644 --- a/mmv1/products/datafusion/api.yaml +++ b/mmv1/products/datafusion/api.yaml @@ -206,3 +206,1005 @@ objects: project the network should specified in the form of projects/{host-project-id}/global/networks/{network} required: true input: true + + + + - !ruby/object:Api::Resource + name: Instance + base_url: '{{+parent}}/instances' + self_link: '{{+name}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/datafusion/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + Represents a Data Fusion instance. + properties: + + - !ruby/object:Api::Type::String + name: 'name' + description: | + Output only. The name of this instance is in the form of projects/{project}/locations/{location}/instances/{instance}. + - !ruby/object:Api::Type::String + name: 'description' + description: | + A description of this instance. + - !ruby/object:Api::Type::Enum + name: 'type' + description: | + Required. Instance type. + values: + - :TYPE_UNSPECIFIED + - :BASIC + - :ENTERPRISE + - :DEVELOPER + - !ruby/object:Api::Type::Boolean + name: 'enableStackdriverLogging' + description: | + Option to enable Stackdriver Logging. + - !ruby/object:Api::Type::Boolean + name: 'enableStackdriverMonitoring' + description: | + Option to enable Stackdriver Monitoring. + - !ruby/object:Api::Type::Boolean + name: 'privateInstance' + description: | + Specifies whether the Data Fusion instance should be private. If set to true, all Data Fusion nodes will have private IP addresses and will not be able to access the public internet. + - !ruby/object:Api::Type::NestedObject + name: 'networkConfig' + description: | + Network configuration for a Data Fusion instance. These configurations are used for peering with the customer network. Configurations are optional when a public Data Fusion instance is to be created. However, providing these configurations allows several benefits, such as reduced network latency while accessing the customer resources from managed Data Fusion instance nodes, as well as access to the customer on-prem resources. + properties: + - !ruby/object:Api::Type::String + name: 'network' + description: | + Optional. Name of the network in the customer project with which the Tenant Project will be peered for executing pipelines. In case of shared VPC where the network resides in another host project the network should specified in the form of projects/{host-project-id}/global/networks/{network}. This is only required for connectivity type VPC_PEERING. + - !ruby/object:Api::Type::String + name: 'ipAllocation' + description: | + Optional. The IP range in CIDR notation to use for the managed Data Fusion instance nodes. This range must not overlap with any other ranges used in the Data Fusion instance network. This is required only when using connection type VPC_PEERING. Format: a.b.c.d/22 Example: 192.168.0.0/22 + - !ruby/object:Api::Type::Enum + name: 'connectionType' + description: | + Optional. Type of connection for establishing private IP connectivity between the Data Fusion customer project VPC and the corresponding tenant project from a predefined list of available connection modes. If this field is unspecified for a private instance, VPC peering is used. + values: + - :CONNECTION_TYPE_UNSPECIFIED + - :VPC_PEERING + - :PRIVATE_SERVICE_CONNECT_INTERFACES + - !ruby/object:Api::Type::NestedObject + name: 'privateServiceConnectConfig' + description: | + Configuration for using Private Service Connect to establish connectivity between the Data Fusion consumer project and the corresponding tenant project. + properties: + - !ruby/object:Api::Type::String + name: 'networkAttachment' + description: | + Required. The reference to the network attachment used to establish private connectivity. It will be of the form projects/{project-id}/regions/{region}/networkAttachments/{network-attachment-id}. + - !ruby/object:Api::Type::String + name: 'unreachableCidrBlock' + description: | + Optional. Input only. The CIDR block to which the CDF instance can't route traffic to in the consumer project VPC. The size of this block should be at least /25. This range should not overlap with the primary address range of any subnetwork used by the network attachment. This range can be used for other purposes in the consumer VPC as long as there is no requirement for CDF to reach destinations using these addresses. If this value is not provided, the server chooses a non RFC 1918 address range. The format of this field is governed by RFC 4632. Example: 192.168.0.0/25 + - !ruby/object:Api::Type::String + name: 'effectiveUnreachableCidrBlock' + description: | + Output only. The CIDR block to which the CDF instance can't route traffic to in the consumer project VPC. The size of this block is /25. The format of this field is governed by RFC 4632. Example: 240.0.0.0/25 + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + The resource labels for instance to use to annotate any related underlying resources such as Compute Engine VMs. The character '=' is not allowed to be used within the labels. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'options' + description: | + Map of additional options used to configure the behavior of Data Fusion instance. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Output only. The time the instance was created. + - !ruby/object:Api::Type::String + name: 'updateTime' + description: | + Output only. The time the instance was last updated. + - !ruby/object:Api::Type::Enum + name: 'state' + description: | + Output only. The current state of this Data Fusion instance. + values: + - :STATE_UNSPECIFIED + - :CREATING + - :ACTIVE + - :FAILED + - :DELETING + - :UPGRADING + - :RESTARTING + - :UPDATING + - :AUTO_UPDATING + - :AUTO_UPGRADING + - :DISABLED + - !ruby/object:Api::Type::String + name: 'stateMessage' + description: | + Output only. Additional information about the current state of this Data Fusion instance if available. + - !ruby/object:Api::Type::String + name: 'serviceEndpoint' + description: | + Output only. Endpoint on which the Data Fusion UI is accessible. + - !ruby/object:Api::Type::String + name: 'zone' + description: | + Name of the zone in which the Data Fusion instance will be created. Only DEVELOPER instances use this field. + - !ruby/object:Api::Type::String + name: 'version' + description: | + Current version of the Data Fusion. Only specifiable in Update. + - !ruby/object:Api::Type::String + name: 'serviceAccount' + description: | + Output only. Deprecated. Use tenant_project_id instead to extract the tenant project ID. + - !ruby/object:Api::Type::String + name: 'displayName' + description: | + Display name for an instance. + - !ruby/object:Api::Type::Array + name: 'availableVersion' + description: | + Output only. Available versions that the instance can be upgraded to using UpdateInstanceRequest. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'versionNumber' + description: | + The version number of the Data Fusion instance, such as '6.0.1.0'. + - !ruby/object:Api::Type::Boolean + name: 'defaultVersion' + description: | + Whether this is currently the default version for Cloud Data Fusion + - !ruby/object:Api::Type::Array + name: 'availableFeatures' + description: | + Represents a list of available feature names for a given version. + item_type: Api::Type::String + - !ruby/object:Api::Type::Enum + name: 'type' + description: | + Type represents the release availability of the version + values: + - :TYPE_UNSPECIFIED + - :TYPE_PREVIEW + - :TYPE_GENERAL_AVAILABILITY + - !ruby/object:Api::Type::String + name: 'apiEndpoint' + description: | + Output only. Endpoint on which the REST APIs is accessible. + - !ruby/object:Api::Type::String + name: 'gcsBucket' + description: | + Output only. Cloud Storage bucket generated by Data Fusion in the customer project. + - !ruby/object:Api::Type::Array + name: 'accelerators' + description: | + Output only. List of accelerators enabled for this CDF instance. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::Enum + name: 'acceleratorType' + description: | + Optional. The type of an accelator for a Cloud Data Fusion instance. + values: + - :ACCELERATOR_TYPE_UNSPECIFIED + - :CDC + - :HEALTHCARE + - :CCAI_INSIGHTS + - :CLOUDSEARCH + - !ruby/object:Api::Type::Enum + name: 'state' + description: | + Output only. The state of the accelerator. + values: + - :STATE_UNSPECIFIED + - :ENABLED + - :DISABLED + - :UNKNOWN + - !ruby/object:Api::Type::String + name: 'p4ServiceAccount' + description: | + Output only. Service agent for the customer project. + - !ruby/object:Api::Type::String + name: 'tenantProjectId' + description: | + Output only. The name of the tenant project. + - !ruby/object:Api::Type::String + name: 'dataprocServiceAccount' + description: | + User-managed service account to set on Dataproc when Cloud Data Fusion creates Dataproc to run data processing pipelines. This allows users to have fine-grained access control on Dataproc's accesses to cloud resources. + - !ruby/object:Api::Type::Boolean + name: 'enableRbac' + description: | + Option to enable granular role-based access control. + - !ruby/object:Api::Type::NestedObject + name: 'cryptoKeyConfig' + description: | + The crypto key configuration. This field is used by the Customer-managed encryption keys (CMEK) feature. + properties: + - !ruby/object:Api::Type::String + name: 'keyReference' + description: | + The name of the key which is used to encrypt/decrypt customer data. For key in Cloud KMS, the key should be in the format of `projects/*/locations/*/keyRings/*/cryptoKeys/*`. + - !ruby/object:Api::Type::Array + name: 'disabledReason' + description: | + Output only. If the instance state is DISABLED, the reason for disabling the instance. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'eventPublishConfig' + description: | + Confirguration of PubSubEventWriter. + properties: + - !ruby/object:Api::Type::Boolean + name: 'enabled' + description: | + Required. Option to enable Event Publishing. + - !ruby/object:Api::Type::String + name: 'topic' + description: | + Required. The resource name of the Pub/Sub topic. Format: projects/{project_id}/topics/{topic_id} + - !ruby/object:Api::Type::Boolean + name: 'enableZoneSeparation' + description: | + Option to enable granular zone separation. + - !ruby/object:Api::Type::Boolean + name: 'satisfiesPzs' + description: | + Output only. Reserved for future use. + - !ruby/object:Api::Type::String + name: 'workforceIdentityServiceEndpoint' + description: | + Output only. Endpoint on which the Data Fusion UI is accessible to third-party users + - !ruby/object:Api::Type::String + name: 'patchRevision' + description: | + Optional. Current patch revision of the Data Fusion. + - !ruby/object:Api::Type::Boolean + name: 'dataplexDataLineageIntegrationEnabled' + description: | + Optional. Option to enable the Dataplex Lineage Integration feature. + - !ruby/object:Api::Type::NestedObject + name: 'maintenancePolicy' + description: | + Maintenance policy of the instance. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'maintenanceWindow' + description: | + Maintenance window of the instance. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'recurringTimeWindow' + description: | + Represents an arbitrary window of time that recurs. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'window' + description: | + Represents an arbitrary window of time. + properties: + - !ruby/object:Api::Type::String + name: 'startTime' + description: | + Required. The start time of the time window provided in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. Example: "2024-01-01T12:04:06-04:00" + - !ruby/object:Api::Type::String + name: 'endTime' + description: | + Required. The end time of the time window provided in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. The end time should take place after the start time. Example: "2024-01-02T12:04:06-06:00" + - !ruby/object:Api::Type::String + name: 'recurrence' + description: | + Required. An RRULE with format [RFC-5545](https://tools.ietf.org/html/rfc5545#section-3.8.5.3) for how this window reccurs. They go on for the span of time between the start and end time. The only supported FREQ value is "WEEKLY". To have something repeat every weekday, use: "FREQ=WEEKLY;BYDAY=MO,TU,WE,TH,FR". This specifies how frequently the window starts. To have a 9 am - 5 pm UTC-4 window every weekday, use something like: ``` start time = 2019-01-01T09:00:00-0400 end time = 2019-01-01T17:00:00-0400 recurrence = FREQ=WEEKLY;BYDAY=MO,TU,WE,TH,FR ``` + - !ruby/object:Api::Type::NestedObject + name: 'maintenanceExclusionWindow' + description: | + Represents an arbitrary window of time. + properties: + - !ruby/object:Api::Type::String + name: 'startTime' + description: | + Required. The start time of the time window provided in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. Example: "2024-01-01T12:04:06-04:00" + - !ruby/object:Api::Type::String + name: 'endTime' + description: | + Required. The end time of the time window provided in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. The end time should take place after the start time. Example: "2024-01-02T12:04:06-06:00" + + + + + - !ruby/object:Api::Resource + name: Instance + base_url: '{{+parent}}/instances' + self_link: '{{+name}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/datafusion/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + Represents a Data Fusion instance. + properties: + + - !ruby/object:Api::Type::String + name: 'name' + description: | + Output only. The name of this instance is in the form of projects/{project}/locations/{location}/instances/{instance}. + - !ruby/object:Api::Type::String + name: 'description' + description: | + A description of this instance. + - !ruby/object:Api::Type::Enum + name: 'type' + description: | + Required. Instance type. + values: + - :TYPE_UNSPECIFIED + - :BASIC + - :ENTERPRISE + - :DEVELOPER + - !ruby/object:Api::Type::Boolean + name: 'enableStackdriverLogging' + description: | + Option to enable Stackdriver Logging. + - !ruby/object:Api::Type::Boolean + name: 'enableStackdriverMonitoring' + description: | + Option to enable Stackdriver Monitoring. + - !ruby/object:Api::Type::Boolean + name: 'privateInstance' + description: | + Specifies whether the Data Fusion instance should be private. If set to true, all Data Fusion nodes will have private IP addresses and will not be able to access the public internet. + - !ruby/object:Api::Type::NestedObject + name: 'networkConfig' + description: | + Network configuration for a Data Fusion instance. These configurations are used for peering with the customer network. Configurations are optional when a public Data Fusion instance is to be created. However, providing these configurations allows several benefits, such as reduced network latency while accessing the customer resources from managed Data Fusion instance nodes, as well as access to the customer on-prem resources. + properties: + - !ruby/object:Api::Type::String + name: 'network' + description: | + Optional. Name of the network in the customer project with which the Tenant Project will be peered for executing pipelines. In case of shared VPC where the network resides in another host project the network should specified in the form of projects/{host-project-id}/global/networks/{network}. This is only required for connectivity type VPC_PEERING. + - !ruby/object:Api::Type::String + name: 'ipAllocation' + description: | + Optional. The IP range in CIDR notation to use for the managed Data Fusion instance nodes. This range must not overlap with any other ranges used in the Data Fusion instance network. This is required only when using connection type VPC_PEERING. Format: a.b.c.d/22 Example: 192.168.0.0/22 + - !ruby/object:Api::Type::Enum + name: 'connectionType' + description: | + Optional. Type of connection for establishing private IP connectivity between the Data Fusion customer project VPC and the corresponding tenant project from a predefined list of available connection modes. If this field is unspecified for a private instance, VPC peering is used. + values: + - :CONNECTION_TYPE_UNSPECIFIED + - :VPC_PEERING + - :PRIVATE_SERVICE_CONNECT_INTERFACES + - !ruby/object:Api::Type::NestedObject + name: 'privateServiceConnectConfig' + description: | + Configuration for using Private Service Connect to establish connectivity between the Data Fusion consumer project and the corresponding tenant project. + properties: + - !ruby/object:Api::Type::String + name: 'networkAttachment' + description: | + Required. The reference to the network attachment used to establish private connectivity. It will be of the form projects/{project-id}/regions/{region}/networkAttachments/{network-attachment-id}. + - !ruby/object:Api::Type::String + name: 'unreachableCidrBlock' + description: | + Optional. Input only. The CIDR block to which the CDF instance can't route traffic to in the consumer project VPC. The size of this block should be at least /25. This range should not overlap with the primary address range of any subnetwork used by the network attachment. This range can be used for other purposes in the consumer VPC as long as there is no requirement for CDF to reach destinations using these addresses. If this value is not provided, the server chooses a non RFC 1918 address range. The format of this field is governed by RFC 4632. Example: 192.168.0.0/25 + - !ruby/object:Api::Type::String + name: 'effectiveUnreachableCidrBlock' + description: | + Output only. The CIDR block to which the CDF instance can't route traffic to in the consumer project VPC. The size of this block is /25. The format of this field is governed by RFC 4632. Example: 240.0.0.0/25 + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + The resource labels for instance to use to annotate any related underlying resources such as Compute Engine VMs. The character '=' is not allowed to be used within the labels. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'options' + description: | + Map of additional options used to configure the behavior of Data Fusion instance. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Output only. The time the instance was created. + - !ruby/object:Api::Type::String + name: 'updateTime' + description: | + Output only. The time the instance was last updated. + - !ruby/object:Api::Type::Enum + name: 'state' + description: | + Output only. The current state of this Data Fusion instance. + values: + - :STATE_UNSPECIFIED + - :CREATING + - :ACTIVE + - :FAILED + - :DELETING + - :UPGRADING + - :RESTARTING + - :UPDATING + - :AUTO_UPDATING + - :AUTO_UPGRADING + - :DISABLED + - !ruby/object:Api::Type::String + name: 'stateMessage' + description: | + Output only. Additional information about the current state of this Data Fusion instance if available. + - !ruby/object:Api::Type::String + name: 'serviceEndpoint' + description: | + Output only. Endpoint on which the Data Fusion UI is accessible. + - !ruby/object:Api::Type::String + name: 'zone' + description: | + Name of the zone in which the Data Fusion instance will be created. Only DEVELOPER instances use this field. + - !ruby/object:Api::Type::String + name: 'version' + description: | + Current version of the Data Fusion. Only specifiable in Update. + - !ruby/object:Api::Type::String + name: 'serviceAccount' + description: | + Output only. Deprecated. Use tenant_project_id instead to extract the tenant project ID. + - !ruby/object:Api::Type::String + name: 'displayName' + description: | + Display name for an instance. + - !ruby/object:Api::Type::Array + name: 'availableVersion' + description: | + Output only. Available versions that the instance can be upgraded to using UpdateInstanceRequest. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'versionNumber' + description: | + The version number of the Data Fusion instance, such as '6.0.1.0'. + - !ruby/object:Api::Type::Boolean + name: 'defaultVersion' + description: | + Whether this is currently the default version for Cloud Data Fusion + - !ruby/object:Api::Type::Array + name: 'availableFeatures' + description: | + Represents a list of available feature names for a given version. + item_type: Api::Type::String + - !ruby/object:Api::Type::Enum + name: 'type' + description: | + Type represents the release availability of the version + values: + - :TYPE_UNSPECIFIED + - :TYPE_PREVIEW + - :TYPE_GENERAL_AVAILABILITY + - !ruby/object:Api::Type::String + name: 'apiEndpoint' + description: | + Output only. Endpoint on which the REST APIs is accessible. + - !ruby/object:Api::Type::String + name: 'gcsBucket' + description: | + Output only. Cloud Storage bucket generated by Data Fusion in the customer project. + - !ruby/object:Api::Type::Array + name: 'accelerators' + description: | + Output only. List of accelerators enabled for this CDF instance. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::Enum + name: 'acceleratorType' + description: | + Optional. The type of an accelator for a Cloud Data Fusion instance. + values: + - :ACCELERATOR_TYPE_UNSPECIFIED + - :CDC + - :HEALTHCARE + - :CCAI_INSIGHTS + - :CLOUDSEARCH + - !ruby/object:Api::Type::Enum + name: 'state' + description: | + Output only. The state of the accelerator. + values: + - :STATE_UNSPECIFIED + - :ENABLED + - :DISABLED + - :UNKNOWN + - !ruby/object:Api::Type::String + name: 'p4ServiceAccount' + description: | + Output only. Service agent for the customer project. + - !ruby/object:Api::Type::String + name: 'tenantProjectId' + description: | + Output only. The name of the tenant project. + - !ruby/object:Api::Type::String + name: 'dataprocServiceAccount' + description: | + User-managed service account to set on Dataproc when Cloud Data Fusion creates Dataproc to run data processing pipelines. This allows users to have fine-grained access control on Dataproc's accesses to cloud resources. + - !ruby/object:Api::Type::Boolean + name: 'enableRbac' + description: | + Option to enable granular role-based access control. + - !ruby/object:Api::Type::NestedObject + name: 'cryptoKeyConfig' + description: | + The crypto key configuration. This field is used by the Customer-managed encryption keys (CMEK) feature. + properties: + - !ruby/object:Api::Type::String + name: 'keyReference' + description: | + The name of the key which is used to encrypt/decrypt customer data. For key in Cloud KMS, the key should be in the format of `projects/*/locations/*/keyRings/*/cryptoKeys/*`. + - !ruby/object:Api::Type::Array + name: 'disabledReason' + description: | + Output only. If the instance state is DISABLED, the reason for disabling the instance. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'eventPublishConfig' + description: | + Confirguration of PubSubEventWriter. + properties: + - !ruby/object:Api::Type::Boolean + name: 'enabled' + description: | + Required. Option to enable Event Publishing. + - !ruby/object:Api::Type::String + name: 'topic' + description: | + Required. The resource name of the Pub/Sub topic. Format: projects/{project_id}/topics/{topic_id} + - !ruby/object:Api::Type::Boolean + name: 'enableZoneSeparation' + description: | + Option to enable granular zone separation. + - !ruby/object:Api::Type::Boolean + name: 'satisfiesPzs' + description: | + Output only. Reserved for future use. + - !ruby/object:Api::Type::String + name: 'workforceIdentityServiceEndpoint' + description: | + Output only. Endpoint on which the Data Fusion UI is accessible to third-party users + - !ruby/object:Api::Type::String + name: 'patchRevision' + description: | + Optional. Current patch revision of the Data Fusion. + - !ruby/object:Api::Type::Boolean + name: 'dataplexDataLineageIntegrationEnabled' + description: | + Optional. Option to enable the Dataplex Lineage Integration feature. + - !ruby/object:Api::Type::NestedObject + name: 'maintenancePolicy' + description: | + Maintenance policy of the instance. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'maintenanceWindow' + description: | + Maintenance window of the instance. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'recurringTimeWindow' + description: | + Represents an arbitrary window of time that recurs. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'window' + description: | + Represents an arbitrary window of time. + properties: + - !ruby/object:Api::Type::String + name: 'startTime' + description: | + Required. The start time of the time window provided in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. Example: "2024-01-01T12:04:06-04:00" + - !ruby/object:Api::Type::String + name: 'endTime' + description: | + Required. The end time of the time window provided in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. The end time should take place after the start time. Example: "2024-01-02T12:04:06-06:00" + - !ruby/object:Api::Type::String + name: 'recurrence' + description: | + Required. An RRULE with format [RFC-5545](https://tools.ietf.org/html/rfc5545#section-3.8.5.3) for how this window reccurs. They go on for the span of time between the start and end time. The only supported FREQ value is "WEEKLY". To have something repeat every weekday, use: "FREQ=WEEKLY;BYDAY=MO,TU,WE,TH,FR". This specifies how frequently the window starts. To have a 9 am - 5 pm UTC-4 window every weekday, use something like: ``` start time = 2019-01-01T09:00:00-0400 end time = 2019-01-01T17:00:00-0400 recurrence = FREQ=WEEKLY;BYDAY=MO,TU,WE,TH,FR ``` + - !ruby/object:Api::Type::NestedObject + name: 'maintenanceExclusionWindow' + description: | + Represents an arbitrary window of time. + properties: + - !ruby/object:Api::Type::String + name: 'startTime' + description: | + Required. The start time of the time window provided in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. Example: "2024-01-01T12:04:06-04:00" + - !ruby/object:Api::Type::String + name: 'endTime' + description: | + Required. The end time of the time window provided in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. The end time should take place after the start time. Example: "2024-01-02T12:04:06-06:00" + + + + + - !ruby/object:Api::Resource + name: Instance + base_url: '{{+parent}}/instances' + self_link: '{{+name}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/datafusion/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + Represents a Data Fusion instance. + properties: + + - !ruby/object:Api::Type::String + name: 'name' + description: | + Output only. The name of this instance is in the form of projects/{project}/locations/{location}/instances/{instance}. + - !ruby/object:Api::Type::String + name: 'description' + description: | + A description of this instance. + - !ruby/object:Api::Type::Enum + name: 'type' + description: | + Required. Instance type. + values: + - :TYPE_UNSPECIFIED + - :BASIC + - :ENTERPRISE + - :DEVELOPER + - !ruby/object:Api::Type::Boolean + name: 'enableStackdriverLogging' + description: | + Option to enable Stackdriver Logging. + - !ruby/object:Api::Type::Boolean + name: 'enableStackdriverMonitoring' + description: | + Option to enable Stackdriver Monitoring. + - !ruby/object:Api::Type::Boolean + name: 'privateInstance' + description: | + Specifies whether the Data Fusion instance should be private. If set to true, all Data Fusion nodes will have private IP addresses and will not be able to access the public internet. + - !ruby/object:Api::Type::NestedObject + name: 'networkConfig' + description: | + Network configuration for a Data Fusion instance. These configurations are used for peering with the customer network. Configurations are optional when a public Data Fusion instance is to be created. However, providing these configurations allows several benefits, such as reduced network latency while accessing the customer resources from managed Data Fusion instance nodes, as well as access to the customer on-prem resources. + properties: + - !ruby/object:Api::Type::String + name: 'network' + description: | + Optional. Name of the network in the customer project with which the Tenant Project will be peered for executing pipelines. In case of shared VPC where the network resides in another host project the network should specified in the form of projects/{host-project-id}/global/networks/{network}. This is only required for connectivity type VPC_PEERING. + - !ruby/object:Api::Type::String + name: 'ipAllocation' + description: | + Optional. The IP range in CIDR notation to use for the managed Data Fusion instance nodes. This range must not overlap with any other ranges used in the Data Fusion instance network. This is required only when using connection type VPC_PEERING. Format: a.b.c.d/22 Example: 192.168.0.0/22 + - !ruby/object:Api::Type::Enum + name: 'connectionType' + description: | + Optional. Type of connection for establishing private IP connectivity between the Data Fusion customer project VPC and the corresponding tenant project from a predefined list of available connection modes. If this field is unspecified for a private instance, VPC peering is used. + values: + - :CONNECTION_TYPE_UNSPECIFIED + - :VPC_PEERING + - :PRIVATE_SERVICE_CONNECT_INTERFACES + - !ruby/object:Api::Type::NestedObject + name: 'privateServiceConnectConfig' + description: | + Configuration for using Private Service Connect to establish connectivity between the Data Fusion consumer project and the corresponding tenant project. + properties: + - !ruby/object:Api::Type::String + name: 'networkAttachment' + description: | + Required. The reference to the network attachment used to establish private connectivity. It will be of the form projects/{project-id}/regions/{region}/networkAttachments/{network-attachment-id}. + - !ruby/object:Api::Type::String + name: 'unreachableCidrBlock' + description: | + Optional. Input only. The CIDR block to which the CDF instance can't route traffic to in the consumer project VPC. The size of this block should be at least /25. This range should not overlap with the primary address range of any subnetwork used by the network attachment. This range can be used for other purposes in the consumer VPC as long as there is no requirement for CDF to reach destinations using these addresses. If this value is not provided, the server chooses a non RFC 1918 address range. The format of this field is governed by RFC 4632. Example: 192.168.0.0/25 + - !ruby/object:Api::Type::String + name: 'effectiveUnreachableCidrBlock' + description: | + Output only. The CIDR block to which the CDF instance can't route traffic to in the consumer project VPC. The size of this block is /25. The format of this field is governed by RFC 4632. Example: 240.0.0.0/25 + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + The resource labels for instance to use to annotate any related underlying resources such as Compute Engine VMs. The character '=' is not allowed to be used within the labels. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'options' + description: | + Map of additional options used to configure the behavior of Data Fusion instance. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Output only. The time the instance was created. + - !ruby/object:Api::Type::String + name: 'updateTime' + description: | + Output only. The time the instance was last updated. + - !ruby/object:Api::Type::Enum + name: 'state' + description: | + Output only. The current state of this Data Fusion instance. + values: + - :STATE_UNSPECIFIED + - :CREATING + - :ACTIVE + - :FAILED + - :DELETING + - :UPGRADING + - :RESTARTING + - :UPDATING + - :AUTO_UPDATING + - :AUTO_UPGRADING + - :DISABLED + - !ruby/object:Api::Type::String + name: 'stateMessage' + description: | + Output only. Additional information about the current state of this Data Fusion instance if available. + - !ruby/object:Api::Type::String + name: 'serviceEndpoint' + description: | + Output only. Endpoint on which the Data Fusion UI is accessible. + - !ruby/object:Api::Type::String + name: 'zone' + description: | + Name of the zone in which the Data Fusion instance will be created. Only DEVELOPER instances use this field. + - !ruby/object:Api::Type::String + name: 'version' + description: | + Current version of the Data Fusion. Only specifiable in Update. + - !ruby/object:Api::Type::String + name: 'serviceAccount' + description: | + Output only. Deprecated. Use tenant_project_id instead to extract the tenant project ID. + - !ruby/object:Api::Type::String + name: 'displayName' + description: | + Display name for an instance. + - !ruby/object:Api::Type::Array + name: 'availableVersion' + description: | + Output only. Available versions that the instance can be upgraded to using UpdateInstanceRequest. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'versionNumber' + description: | + The version number of the Data Fusion instance, such as '6.0.1.0'. + - !ruby/object:Api::Type::Boolean + name: 'defaultVersion' + description: | + Whether this is currently the default version for Cloud Data Fusion + - !ruby/object:Api::Type::Array + name: 'availableFeatures' + description: | + Represents a list of available feature names for a given version. + item_type: Api::Type::String + - !ruby/object:Api::Type::Enum + name: 'type' + description: | + Type represents the release availability of the version + values: + - :TYPE_UNSPECIFIED + - :TYPE_PREVIEW + - :TYPE_GENERAL_AVAILABILITY + - !ruby/object:Api::Type::String + name: 'apiEndpoint' + description: | + Output only. Endpoint on which the REST APIs is accessible. + - !ruby/object:Api::Type::String + name: 'gcsBucket' + description: | + Output only. Cloud Storage bucket generated by Data Fusion in the customer project. + - !ruby/object:Api::Type::Array + name: 'accelerators' + description: | + Output only. List of accelerators enabled for this CDF instance. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::Enum + name: 'acceleratorType' + description: | + Optional. The type of an accelator for a Cloud Data Fusion instance. + values: + - :ACCELERATOR_TYPE_UNSPECIFIED + - :CDC + - :HEALTHCARE + - :CCAI_INSIGHTS + - :CLOUDSEARCH + - !ruby/object:Api::Type::Enum + name: 'state' + description: | + Output only. The state of the accelerator. + values: + - :STATE_UNSPECIFIED + - :ENABLED + - :DISABLED + - :UNKNOWN + - !ruby/object:Api::Type::String + name: 'p4ServiceAccount' + description: | + Output only. Service agent for the customer project. + - !ruby/object:Api::Type::String + name: 'tenantProjectId' + description: | + Output only. The name of the tenant project. + - !ruby/object:Api::Type::String + name: 'dataprocServiceAccount' + description: | + User-managed service account to set on Dataproc when Cloud Data Fusion creates Dataproc to run data processing pipelines. This allows users to have fine-grained access control on Dataproc's accesses to cloud resources. + - !ruby/object:Api::Type::Boolean + name: 'enableRbac' + description: | + Option to enable granular role-based access control. + - !ruby/object:Api::Type::NestedObject + name: 'cryptoKeyConfig' + description: | + The crypto key configuration. This field is used by the Customer-managed encryption keys (CMEK) feature. + properties: + - !ruby/object:Api::Type::String + name: 'keyReference' + description: | + The name of the key which is used to encrypt/decrypt customer data. For key in Cloud KMS, the key should be in the format of `projects/*/locations/*/keyRings/*/cryptoKeys/*`. + - !ruby/object:Api::Type::Array + name: 'disabledReason' + description: | + Output only. If the instance state is DISABLED, the reason for disabling the instance. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'eventPublishConfig' + description: | + Confirguration of PubSubEventWriter. + properties: + - !ruby/object:Api::Type::Boolean + name: 'enabled' + description: | + Required. Option to enable Event Publishing. + - !ruby/object:Api::Type::String + name: 'topic' + description: | + Required. The resource name of the Pub/Sub topic. Format: projects/{project_id}/topics/{topic_id} + - !ruby/object:Api::Type::Boolean + name: 'enableZoneSeparation' + description: | + Option to enable granular zone separation. + - !ruby/object:Api::Type::Boolean + name: 'satisfiesPzs' + description: | + Output only. Reserved for future use. + - !ruby/object:Api::Type::String + name: 'workforceIdentityServiceEndpoint' + description: | + Output only. Endpoint on which the Data Fusion UI is accessible to third-party users + - !ruby/object:Api::Type::String + name: 'patchRevision' + description: | + Optional. Current patch revision of the Data Fusion. + - !ruby/object:Api::Type::Boolean + name: 'dataplexDataLineageIntegrationEnabled' + description: | + Optional. Option to enable the Dataplex Lineage Integration feature. + - !ruby/object:Api::Type::NestedObject + name: 'maintenancePolicy' + description: | + Maintenance policy of the instance. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'maintenanceWindow' + description: | + Maintenance window of the instance. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'recurringTimeWindow' + description: | + Represents an arbitrary window of time that recurs. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'window' + description: | + Represents an arbitrary window of time. + properties: + - !ruby/object:Api::Type::String + name: 'startTime' + description: | + Required. The start time of the time window provided in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. Example: "2024-01-01T12:04:06-04:00" + - !ruby/object:Api::Type::String + name: 'endTime' + description: | + Required. The end time of the time window provided in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. The end time should take place after the start time. Example: "2024-01-02T12:04:06-06:00" + - !ruby/object:Api::Type::String + name: 'recurrence' + description: | + Required. An RRULE with format [RFC-5545](https://tools.ietf.org/html/rfc5545#section-3.8.5.3) for how this window reccurs. They go on for the span of time between the start and end time. The only supported FREQ value is "WEEKLY". To have something repeat every weekday, use: "FREQ=WEEKLY;BYDAY=MO,TU,WE,TH,FR". This specifies how frequently the window starts. To have a 9 am - 5 pm UTC-4 window every weekday, use something like: ``` start time = 2019-01-01T09:00:00-0400 end time = 2019-01-01T17:00:00-0400 recurrence = FREQ=WEEKLY;BYDAY=MO,TU,WE,TH,FR ``` + - !ruby/object:Api::Type::NestedObject + name: 'maintenanceExclusionWindow' + description: | + Represents an arbitrary window of time. + properties: + - !ruby/object:Api::Type::String + name: 'startTime' + description: | + Required. The start time of the time window provided in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. Example: "2024-01-01T12:04:06-04:00" + - !ruby/object:Api::Type::String + name: 'endTime' + description: | + Required. The end time of the time window provided in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. The end time should take place after the start time. Example: "2024-01-02T12:04:06-06:00" + diff --git a/mmv1/products/datafusion/inspec.yaml b/mmv1/products/datafusion/inspec.yaml new file mode 100644 index 000000000..28cb365e9 --- /dev/null +++ b/mmv1/products/datafusion/inspec.yaml @@ -0,0 +1,15 @@ +# Copyright 2017 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- !ruby/object:Provider::Inspec::Config +overrides: !ruby/object:Overrides::ResourceOverrides diff --git a/mmv1/products/dataproc/api.yaml b/mmv1/products/dataproc/api.yaml index ed575636e..10007e539 100644 --- a/mmv1/products/dataproc/api.yaml +++ b/mmv1/products/dataproc/api.yaml @@ -615,3 +615,9346 @@ objects: name: 'realm' description: | The name of the on-cluster Kerberos realm. + + + + - !ruby/object:Api::Resource + name: ProjectRegionCluster + base_url: 'projects/{{projectId}}/regions/{{region}}/clusters' + self_link: 'projects/{{projectId}}/regions/{{region}}/clusters/{{clusterName}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/dataproc/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + Describes the identifying information, config, and status of a Dataproc cluster + properties: + + - !ruby/object:Api::Type::String + name: 'projectId' + description: | + Required. The Google Cloud Platform project ID that the cluster belongs to. + - !ruby/object:Api::Type::String + name: 'clusterName' + description: | + Required. The cluster name, which must be unique within a project. The name must start with a lowercase letter, and can contain up to 51 lowercase letters, numbers, and hyphens. It cannot end with a hyphen. The name of a deleted cluster can be reused. + - !ruby/object:Api::Type::String + name: 'config' + description: | + Optional. The cluster config for a cluster of Compute Engine Instances. Note that Dataproc may set default values, and values may change when clusters are updated.Exactly one of ClusterConfig or VirtualClusterConfig must be specified. + - !ruby/object:Api::Type::String + name: 'virtualClusterConfig' + description: | + Optional. The virtual cluster config is used when creating a Dataproc cluster that does not directly control the underlying compute resources, for example, when creating a Dataproc-on-GKE cluster (https://cloud.google.com/dataproc/docs/guides/dpgke/dataproc-gke-overview). Dataproc may set default values, and values may change when clusters are updated. Exactly one of config or virtual_cluster_config must be specified. + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + Optional. The labels to associate with this cluster. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a cluster. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::String + name: 'status' + description: | + Output only. Cluster status. + - !ruby/object:Api::Type::Array + name: 'statusHistory' + description: | + Output only. The previous cluster status. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'clusterUuid' + description: | + Output only. A cluster UUID (Unique Universal Identifier). Dataproc generates this value when it creates the cluster. + - !ruby/object:Api::Type::String + name: 'metrics' + description: | + Output only. Contains cluster daemon metrics such as HDFS and YARN stats.Beta Feature: This report is available for testing purposes only. It may be changed before final release. + + + + + - !ruby/object:Api::Resource + name: ProjectRegionCluster + base_url: 'projects/{{projectId}}/regions/{{region}}/clusters' + self_link: 'projects/{{projectId}}/regions/{{region}}/clusters/{{clusterName}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/dataproc/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + Describes the identifying information, config, and status of a Dataproc cluster + properties: + + - !ruby/object:Api::Type::String + name: 'projectId' + description: | + Required. The Google Cloud Platform project ID that the cluster belongs to. + - !ruby/object:Api::Type::String + name: 'clusterName' + description: | + Required. The cluster name, which must be unique within a project. The name must start with a lowercase letter, and can contain up to 51 lowercase letters, numbers, and hyphens. It cannot end with a hyphen. The name of a deleted cluster can be reused. + - !ruby/object:Api::Type::String + name: 'config' + description: | + Optional. The cluster config for a cluster of Compute Engine Instances. Note that Dataproc may set default values, and values may change when clusters are updated.Exactly one of ClusterConfig or VirtualClusterConfig must be specified. + - !ruby/object:Api::Type::String + name: 'virtualClusterConfig' + description: | + Optional. The virtual cluster config is used when creating a Dataproc cluster that does not directly control the underlying compute resources, for example, when creating a Dataproc-on-GKE cluster (https://cloud.google.com/dataproc/docs/guides/dpgke/dataproc-gke-overview). Dataproc may set default values, and values may change when clusters are updated. Exactly one of config or virtual_cluster_config must be specified. + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + Optional. The labels to associate with this cluster. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a cluster. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::String + name: 'status' + description: | + Output only. Cluster status. + - !ruby/object:Api::Type::Array + name: 'statusHistory' + description: | + Output only. The previous cluster status. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'clusterUuid' + description: | + Output only. A cluster UUID (Unique Universal Identifier). Dataproc generates this value when it creates the cluster. + - !ruby/object:Api::Type::String + name: 'metrics' + description: | + Output only. Contains cluster daemon metrics such as HDFS and YARN stats.Beta Feature: This report is available for testing purposes only. It may be changed before final release. + + + + + - !ruby/object:Api::Resource + name: ProjectLocationWorkflowTemplate + base_url: '{{+parent}}/workflowTemplates' + self_link: '{{+name}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/dataproc/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + A Dataproc workflow template resource. + properties: + + - !ruby/object:Api::Type::String + name: 'id' + description: | + + - !ruby/object:Api::Type::String + name: 'name' + description: | + Output only. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} For projects.locations.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id} + - !ruby/object:Api::Type::Integer + name: 'version' + description: | + Optional. Used to perform a consistent read-modify-write.This field should be left blank for a CreateWorkflowTemplate request. It is required for an UpdateWorkflowTemplate request, and must match the current server version. A typical update template flow would fetch the current template with a GetWorkflowTemplate request, which will return the current template with the version field filled in with the current server version. The user updates other fields in the template, then returns it as part of the UpdateWorkflowTemplate request. + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Output only. The time template was created. + - !ruby/object:Api::Type::String + name: 'updateTime' + description: | + Output only. The time template was last updated. + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + Optional. The labels to associate with this template. These labels will be propagated to all jobs and clusters created by the workflow instance.Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt).Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt).No more than 32 labels can be associated with a template. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'placement' + description: | + Specifies workflow execution target.Either managed_cluster or cluster_selector is required. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'managedCluster' + description: | + Cluster that is managed by the workflow. + properties: + - !ruby/object:Api::Type::String + name: 'clusterName' + description: | + Required. The cluster name prefix. A unique cluster name will be formed by appending a random suffix.The name must contain only lower-case letters (a-z), numbers (0-9), and hyphens (-). Must begin with a letter. Cannot begin or end with hyphen. Must consist of between 2 and 35 characters. + - !ruby/object:Api::Type::NestedObject + name: 'config' + description: | + The cluster config. + properties: + - !ruby/object:Api::Type::String + name: 'configBucket' + description: | + Optional. A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see Dataproc staging and temp buckets (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket. + - !ruby/object:Api::Type::String + name: 'tempBucket' + description: | + Optional. A Cloud Storage bucket used to store ephemeral cluster and jobs data, such as Spark and MapReduce history files. If you do not specify a temp bucket, Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's temp bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket. The default bucket has a TTL of 90 days, but you can use any TTL (or none) if you specify a bucket (see Dataproc staging and temp buckets (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket. + - !ruby/object:Api::Type::NestedObject + name: 'gceClusterConfig' + description: | + Common config settings for resources of Compute Engine cluster instances, applicable to all instances in the cluster. + properties: + - !ruby/object:Api::Type::String + name: 'zoneUri' + description: | + Optional. The Compute Engine zone where the Dataproc cluster will be located. If omitted, the service will pick a zone in the cluster's Compute Engine region. On a get request, zone will always be present.A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone] projects/[project_id]/zones/[zone] [zone] + - !ruby/object:Api::Type::String + name: 'networkUri' + description: | + Optional. The Compute Engine network to be used for machine communications. Cannot be specified with subnetwork_uri. If neither network_uri nor subnetwork_uri is specified, the "default" network of the project is used, if it exists. Cannot be a "Custom Subnet Network" (see Using Subnetworks (https://cloud.google.com/compute/docs/subnetworks) for more information).A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/global/networks/default projects/[project_id]/global/networks/default default + - !ruby/object:Api::Type::String + name: 'subnetworkUri' + description: | + Optional. The Compute Engine subnetwork to be used for machine communications. Cannot be specified with network_uri.A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/regions/[region]/subnetworks/sub0 projects/[project_id]/regions/[region]/subnetworks/sub0 sub0 + - !ruby/object:Api::Type::Boolean + name: 'internalIpOnly' + description: | + Optional. This setting applies to subnetwork-enabled networks. It is set to true by default in clusters created with image versions 2.2.x.When set to true: All cluster VMs have internal IP addresses. Google Private Access (https://cloud.google.com/vpc/docs/private-google-access) must be enabled to access Dataproc and other Google Cloud APIs. Off-cluster dependencies must be configured to be accessible without external IP addresses.When set to false: Cluster VMs are not restricted to internal IP addresses. Ephemeral external IP addresses are assigned to each cluster VM. + - !ruby/object:Api::Type::Enum + name: 'privateIpv6GoogleAccess' + description: | + Optional. The type of IPv6 access for a cluster. + values: + - :PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED + - :INHERIT_FROM_SUBNETWORK + - :OUTBOUND + - :BIDIRECTIONAL + - !ruby/object:Api::Type::String + name: 'serviceAccount' + description: | + Optional. The Dataproc service account (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/service-accounts#service_accounts_in_dataproc) (also see VM Data Plane identity (https://cloud.google.com/dataproc/docs/concepts/iam/dataproc-principals#vm_service_account_data_plane_identity)) used by Dataproc cluster VM instances to access Google Cloud Platform services.If not specified, the Compute Engine default service account (https://cloud.google.com/compute/docs/access/service-accounts#default_service_account) is used. + - !ruby/object:Api::Type::Array + name: 'serviceAccountScopes' + description: | + Optional. The URIs of service account scopes to be included in Compute Engine instances. The following base set of scopes is always included: https://www.googleapis.com/auth/cloud.useraccounts.readonly https://www.googleapis.com/auth/devstorage.read_write https://www.googleapis.com/auth/logging.writeIf no scopes are specified, the following defaults are also provided: https://www.googleapis.com/auth/bigquery https://www.googleapis.com/auth/bigtable.admin.table https://www.googleapis.com/auth/bigtable.data https://www.googleapis.com/auth/devstorage.full_control + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'tags' + description: | + The Compute Engine tags to add to all instances (see Tagging instances (https://cloud.google.com/compute/docs/label-or-tag-resources#tags)). + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'metadata' + description: | + Optional. The Compute Engine metadata entries to add to all instances (see Project and instance metadata (https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)). + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'reservationAffinity' + description: | + Reservation Affinity for consuming Zonal reservation. + properties: + - !ruby/object:Api::Type::Enum + name: 'consumeReservationType' + description: | + Optional. Type of reservation to consume + values: + - :TYPE_UNSPECIFIED + - :NO_RESERVATION + - :ANY_RESERVATION + - :SPECIFIC_RESERVATION + - !ruby/object:Api::Type::String + name: 'key' + description: | + Optional. Corresponds to the label key of reservation resource. + - !ruby/object:Api::Type::Array + name: 'values' + description: | + Optional. Corresponds to the label values of reservation resource. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'nodeGroupAffinity' + description: | + Node Group Affinity for clusters using sole-tenant node groups. The Dataproc NodeGroupAffinity resource is not related to the Dataproc NodeGroup resource. + properties: + - !ruby/object:Api::Type::String + name: 'nodeGroupUri' + description: | + Required. The URI of a sole-tenant node group resource (https://cloud.google.com/compute/docs/reference/rest/v1/nodeGroups) that the cluster will be created on.A full URL, partial URI, or node group name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/nodeGroups/node-group-1 projects/[project_id]/zones/[zone]/nodeGroups/node-group-1 node-group-1 + - !ruby/object:Api::Type::NestedObject + name: 'shieldedInstanceConfig' + description: | + Shielded Instance Config for clusters using Compute Engine Shielded VMs (https://cloud.google.com/security/shielded-cloud/shielded-vm). + properties: + - !ruby/object:Api::Type::Boolean + name: 'enableSecureBoot' + description: | + Optional. Defines whether instances have Secure Boot enabled. + - !ruby/object:Api::Type::Boolean + name: 'enableVtpm' + description: | + Optional. Defines whether instances have the vTPM enabled. + - !ruby/object:Api::Type::Boolean + name: 'enableIntegrityMonitoring' + description: | + Optional. Defines whether instances have integrity monitoring enabled. + - !ruby/object:Api::Type::NestedObject + name: 'confidentialInstanceConfig' + description: | + Confidential Instance Config for clusters using Confidential VMs (https://cloud.google.com/compute/confidential-vm/docs) + properties: + - !ruby/object:Api::Type::Boolean + name: 'enableConfidentialCompute' + description: | + Optional. Defines whether the instance should have confidential compute enabled. + - !ruby/object:Api::Type::NestedObject + name: 'masterConfig' + description: | + The config settings for Compute Engine resources in an instance group, such as a master or worker group. + properties: + - !ruby/object:Api::Type::Integer + name: 'numInstances' + description: | + Optional. The number of VM instances in the instance group. For HA cluster master_config groups, must be set to 3. For standard cluster master_config groups, must be set to 1. + - !ruby/object:Api::Type::Array + name: 'instanceNames' + description: | + Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'instanceReferences' + description: | + Output only. List of references to Compute Engine instances. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'instanceName' + description: | + The user-friendly name of the Compute Engine instance. + - !ruby/object:Api::Type::String + name: 'instanceId' + description: | + The unique identifier of the Compute Engine instance. + - !ruby/object:Api::Type::String + name: 'publicKey' + description: | + The public RSA key used for sharing data with this instance. + - !ruby/object:Api::Type::String + name: 'publicEciesKey' + description: | + The public ECIES key used for sharing data with this instance. + - !ruby/object:Api::Type::String + name: 'imageUri' + description: | + Optional. The Compute Engine image resource used for cluster instances.The URI can represent an image or image family.Image examples: https://www.googleapis.com/compute/v1/projects/[project_id]/global/images/[image-id] projects/[project_id]/global/images/[image-id] image-idImage family examples. Dataproc will use the most recent image from the family: https://www.googleapis.com/compute/v1/projects/[project_id]/global/images/family/[custom-image-family-name] projects/[project_id]/global/images/family/[custom-image-family-name]If the URI is unspecified, it will be inferred from SoftwareConfig.image_version or the system default. + - !ruby/object:Api::Type::String + name: 'machineTypeUri' + description: | + Optional. The Compute Engine machine type used for cluster instances.A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/machineTypes/n1-standard-2 projects/[project_id]/zones/[zone]/machineTypes/n1-standard-2 n1-standard-2Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, n1-standard-2. + - !ruby/object:Api::Type::NestedObject + name: 'diskConfig' + description: | + Specifies the config of disk options for a group of VM instances. + properties: + - !ruby/object:Api::Type::String + name: 'bootDiskType' + description: | + Optional. Type of the boot disk (default is "pd-standard"). Valid values: "pd-balanced" (Persistent Disk Balanced Solid State Drive), "pd-ssd" (Persistent Disk Solid State Drive), or "pd-standard" (Persistent Disk Hard Disk Drive). See Disk types (https://cloud.google.com/compute/docs/disks#disk-types). + - !ruby/object:Api::Type::Integer + name: 'bootDiskSizeGb' + description: | + Optional. Size in GB of the boot disk (default is 500GB). + - !ruby/object:Api::Type::Integer + name: 'numLocalSsds' + description: | + Optional. Number of attached SSDs, from 0 to 8 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and HDFS (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.Note: Local SSD options may vary by machine type and number of vCPUs selected. + - !ruby/object:Api::Type::String + name: 'localSsdInterface' + description: | + Optional. Interface type of local SSDs (default is "scsi"). Valid values: "scsi" (Small Computer System Interface), "nvme" (Non-Volatile Memory Express). See local SSD performance (https://cloud.google.com/compute/docs/disks/local-ssd#performance). + - !ruby/object:Api::Type::Boolean + name: 'isPreemptible' + description: | + Output only. Specifies that this instance group contains preemptible instances. + - !ruby/object:Api::Type::Enum + name: 'preemptibility' + description: | + Optional. Specifies the preemptibility of the instance group.The default value for master and worker groups is NON_PREEMPTIBLE. This default cannot be changed.The default value for secondary instances is PREEMPTIBLE. + values: + - :PREEMPTIBILITY_UNSPECIFIED + - :NON_PREEMPTIBLE + - :PREEMPTIBLE + - :SPOT + - !ruby/object:Api::Type::NestedObject + name: 'managedGroupConfig' + description: | + Specifies the resources used to actively manage an instance group. + properties: + - !ruby/object:Api::Type::String + name: 'instanceTemplateName' + description: | + Output only. The name of the Instance Template used for the Managed Instance Group. + - !ruby/object:Api::Type::String + name: 'instanceGroupManagerName' + description: | + Output only. The name of the Instance Group Manager for this group. + - !ruby/object:Api::Type::String + name: 'instanceGroupManagerUri' + description: | + Output only. The partial URI to the instance group manager for this group. E.g. projects/my-project/regions/us-central1/instanceGroupManagers/my-igm. + - !ruby/object:Api::Type::Array + name: 'accelerators' + description: | + Optional. The Compute Engine accelerator configuration for these instances. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'acceleratorTypeUri' + description: | + Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. + - !ruby/object:Api::Type::Integer + name: 'acceleratorCount' + description: | + The number of the accelerator cards of this type exposed to this instance. + - !ruby/object:Api::Type::String + name: 'minCpuPlatform' + description: | + Optional. Specifies the minimum cpu platform for the Instance Group. See Dataproc -> Minimum CPU Platform (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). + - !ruby/object:Api::Type::Integer + name: 'minNumInstances' + description: | + Optional. The minimum number of primary worker instances to create. If min_num_instances is set, cluster creation will succeed if the number of primary workers created is at least equal to the min_num_instances number.Example: Cluster creation request with num_instances = 5 and min_num_instances = 3: If 4 VMs are created and 1 instance fails, the failed VM is deleted. The cluster is resized to 4 instances and placed in a RUNNING state. If 2 instances are created and 3 instances fail, the cluster in placed in an ERROR state. The failed VMs are not deleted. + - !ruby/object:Api::Type::NestedObject + name: 'instanceFlexibilityPolicy' + description: | + Instance flexibility Policy allowing a mixture of VM shapes and provisioning models. + properties: + - !ruby/object:Api::Type::Array + name: 'instanceSelectionList' + description: | + Optional. List of instance selection options that the group will use when creating new VMs. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::Array + name: 'machineTypes' + description: | + Optional. Full machine-type names, e.g. "n1-standard-16". + item_type: Api::Type::String + - !ruby/object:Api::Type::Integer + name: 'rank' + description: | + Optional. Preference of this instance selection. Lower number means higher preference. Dataproc will first try to create a VM based on the machine-type with priority rank and fallback to next rank based on availability. Machine types and instance selections with the same priority have the same preference. + - !ruby/object:Api::Type::Array + name: 'instanceSelectionResults' + description: | + Output only. A list of instance selection results in the group. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'machineType' + description: | + Output only. Full machine-type names, e.g. "n1-standard-16". + - !ruby/object:Api::Type::Integer + name: 'vmCount' + description: | + Output only. Number of VM provisioned with the machine_type. + - !ruby/object:Api::Type::NestedObject + name: 'startupConfig' + description: | + Configuration to handle the startup of instances during cluster create and update process. + properties: + - !ruby/object:Api::Type::Integer + name: 'requiredRegistrationFraction' + description: | + Optional. The config setting to enable cluster creation/ updation to be successful only after required_registration_fraction of instances are up and running. This configuration is applicable to only secondary workers for now. The cluster will fail if required_registration_fraction of instances are not available. This will include instance creation, agent registration, and service registration (if enabled). + - !ruby/object:Api::Type::NestedObject + name: 'workerConfig' + description: | + The config settings for Compute Engine resources in an instance group, such as a master or worker group. + properties: + - !ruby/object:Api::Type::Integer + name: 'numInstances' + description: | + Optional. The number of VM instances in the instance group. For HA cluster master_config groups, must be set to 3. For standard cluster master_config groups, must be set to 1. + - !ruby/object:Api::Type::Array + name: 'instanceNames' + description: | + Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'instanceReferences' + description: | + Output only. List of references to Compute Engine instances. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'instanceName' + description: | + The user-friendly name of the Compute Engine instance. + - !ruby/object:Api::Type::String + name: 'instanceId' + description: | + The unique identifier of the Compute Engine instance. + - !ruby/object:Api::Type::String + name: 'publicKey' + description: | + The public RSA key used for sharing data with this instance. + - !ruby/object:Api::Type::String + name: 'publicEciesKey' + description: | + The public ECIES key used for sharing data with this instance. + - !ruby/object:Api::Type::String + name: 'imageUri' + description: | + Optional. The Compute Engine image resource used for cluster instances.The URI can represent an image or image family.Image examples: https://www.googleapis.com/compute/v1/projects/[project_id]/global/images/[image-id] projects/[project_id]/global/images/[image-id] image-idImage family examples. Dataproc will use the most recent image from the family: https://www.googleapis.com/compute/v1/projects/[project_id]/global/images/family/[custom-image-family-name] projects/[project_id]/global/images/family/[custom-image-family-name]If the URI is unspecified, it will be inferred from SoftwareConfig.image_version or the system default. + - !ruby/object:Api::Type::String + name: 'machineTypeUri' + description: | + Optional. The Compute Engine machine type used for cluster instances.A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/machineTypes/n1-standard-2 projects/[project_id]/zones/[zone]/machineTypes/n1-standard-2 n1-standard-2Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, n1-standard-2. + - !ruby/object:Api::Type::NestedObject + name: 'diskConfig' + description: | + Specifies the config of disk options for a group of VM instances. + properties: + - !ruby/object:Api::Type::String + name: 'bootDiskType' + description: | + Optional. Type of the boot disk (default is "pd-standard"). Valid values: "pd-balanced" (Persistent Disk Balanced Solid State Drive), "pd-ssd" (Persistent Disk Solid State Drive), or "pd-standard" (Persistent Disk Hard Disk Drive). See Disk types (https://cloud.google.com/compute/docs/disks#disk-types). + - !ruby/object:Api::Type::Integer + name: 'bootDiskSizeGb' + description: | + Optional. Size in GB of the boot disk (default is 500GB). + - !ruby/object:Api::Type::Integer + name: 'numLocalSsds' + description: | + Optional. Number of attached SSDs, from 0 to 8 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and HDFS (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.Note: Local SSD options may vary by machine type and number of vCPUs selected. + - !ruby/object:Api::Type::String + name: 'localSsdInterface' + description: | + Optional. Interface type of local SSDs (default is "scsi"). Valid values: "scsi" (Small Computer System Interface), "nvme" (Non-Volatile Memory Express). See local SSD performance (https://cloud.google.com/compute/docs/disks/local-ssd#performance). + - !ruby/object:Api::Type::Boolean + name: 'isPreemptible' + description: | + Output only. Specifies that this instance group contains preemptible instances. + - !ruby/object:Api::Type::Enum + name: 'preemptibility' + description: | + Optional. Specifies the preemptibility of the instance group.The default value for master and worker groups is NON_PREEMPTIBLE. This default cannot be changed.The default value for secondary instances is PREEMPTIBLE. + values: + - :PREEMPTIBILITY_UNSPECIFIED + - :NON_PREEMPTIBLE + - :PREEMPTIBLE + - :SPOT + - !ruby/object:Api::Type::NestedObject + name: 'managedGroupConfig' + description: | + Specifies the resources used to actively manage an instance group. + properties: + - !ruby/object:Api::Type::String + name: 'instanceTemplateName' + description: | + Output only. The name of the Instance Template used for the Managed Instance Group. + - !ruby/object:Api::Type::String + name: 'instanceGroupManagerName' + description: | + Output only. The name of the Instance Group Manager for this group. + - !ruby/object:Api::Type::String + name: 'instanceGroupManagerUri' + description: | + Output only. The partial URI to the instance group manager for this group. E.g. projects/my-project/regions/us-central1/instanceGroupManagers/my-igm. + - !ruby/object:Api::Type::Array + name: 'accelerators' + description: | + Optional. The Compute Engine accelerator configuration for these instances. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'acceleratorTypeUri' + description: | + Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. + - !ruby/object:Api::Type::Integer + name: 'acceleratorCount' + description: | + The number of the accelerator cards of this type exposed to this instance. + - !ruby/object:Api::Type::String + name: 'minCpuPlatform' + description: | + Optional. Specifies the minimum cpu platform for the Instance Group. See Dataproc -> Minimum CPU Platform (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). + - !ruby/object:Api::Type::Integer + name: 'minNumInstances' + description: | + Optional. The minimum number of primary worker instances to create. If min_num_instances is set, cluster creation will succeed if the number of primary workers created is at least equal to the min_num_instances number.Example: Cluster creation request with num_instances = 5 and min_num_instances = 3: If 4 VMs are created and 1 instance fails, the failed VM is deleted. The cluster is resized to 4 instances and placed in a RUNNING state. If 2 instances are created and 3 instances fail, the cluster in placed in an ERROR state. The failed VMs are not deleted. + - !ruby/object:Api::Type::NestedObject + name: 'instanceFlexibilityPolicy' + description: | + Instance flexibility Policy allowing a mixture of VM shapes and provisioning models. + properties: + - !ruby/object:Api::Type::Array + name: 'instanceSelectionList' + description: | + Optional. List of instance selection options that the group will use when creating new VMs. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::Array + name: 'machineTypes' + description: | + Optional. Full machine-type names, e.g. "n1-standard-16". + item_type: Api::Type::String + - !ruby/object:Api::Type::Integer + name: 'rank' + description: | + Optional. Preference of this instance selection. Lower number means higher preference. Dataproc will first try to create a VM based on the machine-type with priority rank and fallback to next rank based on availability. Machine types and instance selections with the same priority have the same preference. + - !ruby/object:Api::Type::Array + name: 'instanceSelectionResults' + description: | + Output only. A list of instance selection results in the group. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'machineType' + description: | + Output only. Full machine-type names, e.g. "n1-standard-16". + - !ruby/object:Api::Type::Integer + name: 'vmCount' + description: | + Output only. Number of VM provisioned with the machine_type. + - !ruby/object:Api::Type::NestedObject + name: 'startupConfig' + description: | + Configuration to handle the startup of instances during cluster create and update process. + properties: + - !ruby/object:Api::Type::Integer + name: 'requiredRegistrationFraction' + description: | + Optional. The config setting to enable cluster creation/ updation to be successful only after required_registration_fraction of instances are up and running. This configuration is applicable to only secondary workers for now. The cluster will fail if required_registration_fraction of instances are not available. This will include instance creation, agent registration, and service registration (if enabled). + - !ruby/object:Api::Type::NestedObject + name: 'secondaryWorkerConfig' + description: | + The config settings for Compute Engine resources in an instance group, such as a master or worker group. + properties: + - !ruby/object:Api::Type::Integer + name: 'numInstances' + description: | + Optional. The number of VM instances in the instance group. For HA cluster master_config groups, must be set to 3. For standard cluster master_config groups, must be set to 1. + - !ruby/object:Api::Type::Array + name: 'instanceNames' + description: | + Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'instanceReferences' + description: | + Output only. List of references to Compute Engine instances. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'instanceName' + description: | + The user-friendly name of the Compute Engine instance. + - !ruby/object:Api::Type::String + name: 'instanceId' + description: | + The unique identifier of the Compute Engine instance. + - !ruby/object:Api::Type::String + name: 'publicKey' + description: | + The public RSA key used for sharing data with this instance. + - !ruby/object:Api::Type::String + name: 'publicEciesKey' + description: | + The public ECIES key used for sharing data with this instance. + - !ruby/object:Api::Type::String + name: 'imageUri' + description: | + Optional. The Compute Engine image resource used for cluster instances.The URI can represent an image or image family.Image examples: https://www.googleapis.com/compute/v1/projects/[project_id]/global/images/[image-id] projects/[project_id]/global/images/[image-id] image-idImage family examples. Dataproc will use the most recent image from the family: https://www.googleapis.com/compute/v1/projects/[project_id]/global/images/family/[custom-image-family-name] projects/[project_id]/global/images/family/[custom-image-family-name]If the URI is unspecified, it will be inferred from SoftwareConfig.image_version or the system default. + - !ruby/object:Api::Type::String + name: 'machineTypeUri' + description: | + Optional. The Compute Engine machine type used for cluster instances.A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/machineTypes/n1-standard-2 projects/[project_id]/zones/[zone]/machineTypes/n1-standard-2 n1-standard-2Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, n1-standard-2. + - !ruby/object:Api::Type::NestedObject + name: 'diskConfig' + description: | + Specifies the config of disk options for a group of VM instances. + properties: + - !ruby/object:Api::Type::String + name: 'bootDiskType' + description: | + Optional. Type of the boot disk (default is "pd-standard"). Valid values: "pd-balanced" (Persistent Disk Balanced Solid State Drive), "pd-ssd" (Persistent Disk Solid State Drive), or "pd-standard" (Persistent Disk Hard Disk Drive). See Disk types (https://cloud.google.com/compute/docs/disks#disk-types). + - !ruby/object:Api::Type::Integer + name: 'bootDiskSizeGb' + description: | + Optional. Size in GB of the boot disk (default is 500GB). + - !ruby/object:Api::Type::Integer + name: 'numLocalSsds' + description: | + Optional. Number of attached SSDs, from 0 to 8 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and HDFS (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.Note: Local SSD options may vary by machine type and number of vCPUs selected. + - !ruby/object:Api::Type::String + name: 'localSsdInterface' + description: | + Optional. Interface type of local SSDs (default is "scsi"). Valid values: "scsi" (Small Computer System Interface), "nvme" (Non-Volatile Memory Express). See local SSD performance (https://cloud.google.com/compute/docs/disks/local-ssd#performance). + - !ruby/object:Api::Type::Boolean + name: 'isPreemptible' + description: | + Output only. Specifies that this instance group contains preemptible instances. + - !ruby/object:Api::Type::Enum + name: 'preemptibility' + description: | + Optional. Specifies the preemptibility of the instance group.The default value for master and worker groups is NON_PREEMPTIBLE. This default cannot be changed.The default value for secondary instances is PREEMPTIBLE. + values: + - :PREEMPTIBILITY_UNSPECIFIED + - :NON_PREEMPTIBLE + - :PREEMPTIBLE + - :SPOT + - !ruby/object:Api::Type::NestedObject + name: 'managedGroupConfig' + description: | + Specifies the resources used to actively manage an instance group. + properties: + - !ruby/object:Api::Type::String + name: 'instanceTemplateName' + description: | + Output only. The name of the Instance Template used for the Managed Instance Group. + - !ruby/object:Api::Type::String + name: 'instanceGroupManagerName' + description: | + Output only. The name of the Instance Group Manager for this group. + - !ruby/object:Api::Type::String + name: 'instanceGroupManagerUri' + description: | + Output only. The partial URI to the instance group manager for this group. E.g. projects/my-project/regions/us-central1/instanceGroupManagers/my-igm. + - !ruby/object:Api::Type::Array + name: 'accelerators' + description: | + Optional. The Compute Engine accelerator configuration for these instances. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'acceleratorTypeUri' + description: | + Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. + - !ruby/object:Api::Type::Integer + name: 'acceleratorCount' + description: | + The number of the accelerator cards of this type exposed to this instance. + - !ruby/object:Api::Type::String + name: 'minCpuPlatform' + description: | + Optional. Specifies the minimum cpu platform for the Instance Group. See Dataproc -> Minimum CPU Platform (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). + - !ruby/object:Api::Type::Integer + name: 'minNumInstances' + description: | + Optional. The minimum number of primary worker instances to create. If min_num_instances is set, cluster creation will succeed if the number of primary workers created is at least equal to the min_num_instances number.Example: Cluster creation request with num_instances = 5 and min_num_instances = 3: If 4 VMs are created and 1 instance fails, the failed VM is deleted. The cluster is resized to 4 instances and placed in a RUNNING state. If 2 instances are created and 3 instances fail, the cluster in placed in an ERROR state. The failed VMs are not deleted. + - !ruby/object:Api::Type::NestedObject + name: 'instanceFlexibilityPolicy' + description: | + Instance flexibility Policy allowing a mixture of VM shapes and provisioning models. + properties: + - !ruby/object:Api::Type::Array + name: 'instanceSelectionList' + description: | + Optional. List of instance selection options that the group will use when creating new VMs. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::Array + name: 'machineTypes' + description: | + Optional. Full machine-type names, e.g. "n1-standard-16". + item_type: Api::Type::String + - !ruby/object:Api::Type::Integer + name: 'rank' + description: | + Optional. Preference of this instance selection. Lower number means higher preference. Dataproc will first try to create a VM based on the machine-type with priority rank and fallback to next rank based on availability. Machine types and instance selections with the same priority have the same preference. + - !ruby/object:Api::Type::Array + name: 'instanceSelectionResults' + description: | + Output only. A list of instance selection results in the group. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'machineType' + description: | + Output only. Full machine-type names, e.g. "n1-standard-16". + - !ruby/object:Api::Type::Integer + name: 'vmCount' + description: | + Output only. Number of VM provisioned with the machine_type. + - !ruby/object:Api::Type::NestedObject + name: 'startupConfig' + description: | + Configuration to handle the startup of instances during cluster create and update process. + properties: + - !ruby/object:Api::Type::Integer + name: 'requiredRegistrationFraction' + description: | + Optional. The config setting to enable cluster creation/ updation to be successful only after required_registration_fraction of instances are up and running. This configuration is applicable to only secondary workers for now. The cluster will fail if required_registration_fraction of instances are not available. This will include instance creation, agent registration, and service registration (if enabled). + - !ruby/object:Api::Type::NestedObject + name: 'softwareConfig' + description: | + Specifies the selection and config of software inside the cluster. + properties: + - !ruby/object:Api::Type::String + name: 'imageVersion' + description: | + Optional. The version of software inside the cluster. It must be one of the supported Dataproc Versions (https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#supported_dataproc_versions), such as "1.2" (including a subminor version, such as "1.2.29"), or the "preview" version (https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#other_versions). If unspecified, it defaults to the latest Debian version. + - !ruby/object:Api::Type::NestedObject + name: 'properties' + description: | + Optional. The properties to set on daemon config files.Property keys are specified in prefix:property format, for example core:hadoop.tmp.dir. The following are supported prefixes and their mappings: capacity-scheduler: capacity-scheduler.xml core: core-site.xml distcp: distcp-default.xml hdfs: hdfs-site.xml hive: hive-site.xml mapred: mapred-site.xml pig: pig.properties spark: spark-defaults.conf yarn: yarn-site.xmlFor more information, see Cluster properties (https://cloud.google.com/dataproc/docs/concepts/cluster-properties). + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::Array + name: 'optionalComponents' + description: | + Optional. The set of components to activate on the cluster. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'initializationActions' + description: | + Optional. Commands to execute on each node after config is completed. By default, executables are run on master and all worker nodes. You can test a node's role metadata to run an executable on a master or worker node, as shown below using curl (you can also use wget): ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) if [[ "${ROLE}" == 'Master' ]]; then ... master specific actions ... else ... worker specific actions ... fi + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'executableFile' + description: | + Required. Cloud Storage URI of executable file. + - !ruby/object:Api::Type::String + name: 'executionTimeout' + description: | + Optional. Amount of time executable has to complete. Default is 10 minutes (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)).Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period. + - !ruby/object:Api::Type::NestedObject + name: 'encryptionConfig' + description: | + Encryption settings for the cluster. + properties: + - !ruby/object:Api::Type::String + name: 'gcePdKmsKeyName' + description: | + Optional. The Cloud KMS key resource name to use for persistent disk encryption for all instances in the cluster. See Use CMEK with cluster data (https://cloud.google.com//dataproc/docs/concepts/configuring-clusters/customer-managed-encryption#use_cmek_with_cluster_data) for more information. + - !ruby/object:Api::Type::String + name: 'kmsKey' + description: | + Optional. The Cloud KMS key resource name to use for cluster persistent disk and job argument encryption. See Use CMEK with cluster data (https://cloud.google.com//dataproc/docs/concepts/configuring-clusters/customer-managed-encryption#use_cmek_with_cluster_data) for more information.When this key resource name is provided, the following job arguments of the following job types submitted to the cluster are encrypted using CMEK: FlinkJob args (https://cloud.google.com/dataproc/docs/reference/rest/v1/FlinkJob) HadoopJob args (https://cloud.google.com/dataproc/docs/reference/rest/v1/HadoopJob) SparkJob args (https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkJob) SparkRJob args (https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkRJob) PySparkJob args (https://cloud.google.com/dataproc/docs/reference/rest/v1/PySparkJob) SparkSqlJob (https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkSqlJob) scriptVariables and queryList.queries HiveJob (https://cloud.google.com/dataproc/docs/reference/rest/v1/HiveJob) scriptVariables and queryList.queries PigJob (https://cloud.google.com/dataproc/docs/reference/rest/v1/PigJob) scriptVariables and queryList.queries PrestoJob (https://cloud.google.com/dataproc/docs/reference/rest/v1/PrestoJob) scriptVariables and queryList.queries + - !ruby/object:Api::Type::NestedObject + name: 'autoscalingConfig' + description: | + Autoscaling Policy config associated with the cluster. + properties: + - !ruby/object:Api::Type::String + name: 'policyUri' + description: | + Optional. The autoscaling policy used by the cluster.Only resource names including projectid and location (region) are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id] projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]Note that the policy must be in the same project and Dataproc region. + - !ruby/object:Api::Type::NestedObject + name: 'securityConfig' + description: | + Security related configuration, including encryption, Kerberos, etc. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'kerberosConfig' + description: | + Specifies Kerberos related configuration. + properties: + - !ruby/object:Api::Type::Boolean + name: 'enableKerberos' + description: | + Optional. Flag to indicate whether to Kerberize the cluster (default: false). Set this field to true to enable Kerberos on a cluster. + - !ruby/object:Api::Type::String + name: 'rootPrincipalPasswordUri' + description: | + Optional. The Cloud Storage URI of a KMS encrypted file containing the root principal password. + - !ruby/object:Api::Type::String + name: 'kmsKeyUri' + description: | + Optional. The URI of the KMS key used to encrypt sensitive files. + - !ruby/object:Api::Type::String + name: 'keystoreUri' + description: | + Optional. The Cloud Storage URI of the keystore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate. + - !ruby/object:Api::Type::String + name: 'truststoreUri' + description: | + Optional. The Cloud Storage URI of the truststore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate. + - !ruby/object:Api::Type::String + name: 'keystorePasswordUri' + description: | + Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided keystore. For the self-signed certificate, this password is generated by Dataproc. + - !ruby/object:Api::Type::String + name: 'keyPasswordUri' + description: | + Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided key. For the self-signed certificate, this password is generated by Dataproc. + - !ruby/object:Api::Type::String + name: 'truststorePasswordUri' + description: | + Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided truststore. For the self-signed certificate, this password is generated by Dataproc. + - !ruby/object:Api::Type::String + name: 'crossRealmTrustRealm' + description: | + Optional. The remote realm the Dataproc on-cluster KDC will trust, should the user enable cross realm trust. + - !ruby/object:Api::Type::String + name: 'crossRealmTrustKdc' + description: | + Optional. The KDC (IP or hostname) for the remote trusted realm in a cross realm trust relationship. + - !ruby/object:Api::Type::String + name: 'crossRealmTrustAdminServer' + description: | + Optional. The admin server (IP or hostname) for the remote trusted realm in a cross realm trust relationship. + - !ruby/object:Api::Type::String + name: 'crossRealmTrustSharedPasswordUri' + description: | + Optional. The Cloud Storage URI of a KMS encrypted file containing the shared password between the on-cluster Kerberos realm and the remote trusted realm, in a cross realm trust relationship. + - !ruby/object:Api::Type::String + name: 'kdcDbKeyUri' + description: | + Optional. The Cloud Storage URI of a KMS encrypted file containing the master key of the KDC database. + - !ruby/object:Api::Type::Integer + name: 'tgtLifetimeHours' + description: | + Optional. The lifetime of the ticket granting ticket, in hours. If not specified, or user specifies 0, then default value 10 will be used. + - !ruby/object:Api::Type::String + name: 'realm' + description: | + Optional. The name of the on-cluster Kerberos realm. If not specified, the uppercased domain of hostnames will be the realm. + - !ruby/object:Api::Type::NestedObject + name: 'identityConfig' + description: | + Identity related configuration, including service account based secure multi-tenancy user mappings. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'userServiceAccountMapping' + description: | + Required. Map of user to service account. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'lifecycleConfig' + description: | + Specifies the cluster auto-delete schedule configuration. + properties: + - !ruby/object:Api::Type::String + name: 'idleDeleteTtl' + description: | + Optional. The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). + - !ruby/object:Api::Type::String + name: 'autoDeleteTime' + description: | + Optional. The time when cluster will be auto-deleted (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)). + - !ruby/object:Api::Type::String + name: 'autoDeleteTtl' + description: | + Optional. The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). + - !ruby/object:Api::Type::String + name: 'idleStartTime' + description: | + Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)). + - !ruby/object:Api::Type::NestedObject + name: 'endpointConfig' + description: | + Endpoint config for this cluster + properties: + - !ruby/object:Api::Type::NestedObject + name: 'httpPorts' + description: | + Output only. The map of port descriptions to URLs. Will only be populated if enable_http_port_access is true. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::Boolean + name: 'enableHttpPortAccess' + description: | + Optional. If true, enable http access to specific ports on the cluster from external sources. Defaults to false. + - !ruby/object:Api::Type::NestedObject + name: 'metastoreConfig' + description: | + Specifies a Metastore configuration. + properties: + - !ruby/object:Api::Type::String + name: 'dataprocMetastoreService' + description: | + Required. Resource name of an existing Dataproc Metastore service.Example: projects/[project_id]/locations/[dataproc_region]/services/[service-name] + - !ruby/object:Api::Type::NestedObject + name: 'gkeClusterConfig' + description: | + The cluster's GKE config. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'namespacedGkeDeploymentTarget' + description: | + Deprecated. Used only for the deprecated beta. A full, namespace-isolated deployment target for an existing GKE cluster. + properties: + - !ruby/object:Api::Type::String + name: 'targetGkeCluster' + description: | + Optional. The target GKE cluster to deploy to. Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}' + - !ruby/object:Api::Type::String + name: 'clusterNamespace' + description: | + Optional. A namespace within the GKE cluster to deploy into. + - !ruby/object:Api::Type::String + name: 'gkeClusterTarget' + description: | + Optional. A target GKE cluster to deploy to. It must be in the same project and region as the Dataproc cluster (the GKE cluster can be zonal or regional). Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}' + - !ruby/object:Api::Type::Array + name: 'nodePoolTarget' + description: | + Optional. GKE node pools where workloads will be scheduled. At least one node pool must be assigned the DEFAULT GkeNodePoolTarget.Role. If a GkeNodePoolTarget is not specified, Dataproc constructs a DEFAULT GkeNodePoolTarget. Each role can be given to only one GkeNodePoolTarget. All node pools must have the same location settings. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'nodePool' + description: | + Required. The target GKE node pool. Format: 'projects/{project}/locations/{location}/clusters/{cluster}/nodePools/{node_pool}' + - !ruby/object:Api::Type::Array + name: 'roles' + description: | + Required. The roles associated with the GKE node pool. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'nodePoolConfig' + description: | + The configuration of a GKE node pool used by a Dataproc-on-GKE cluster (https://cloud.google.com/dataproc/docs/concepts/jobs/dataproc-gke#create-a-dataproc-on-gke-cluster). + properties: + - !ruby/object:Api::Type::NestedObject + name: 'config' + description: | + Parameters that describe cluster nodes. + properties: + - !ruby/object:Api::Type::String + name: 'machineType' + description: | + Optional. The name of a Compute Engine machine type (https://cloud.google.com/compute/docs/machine-types). + - !ruby/object:Api::Type::Integer + name: 'localSsdCount' + description: | + Optional. The number of local SSD disks to attach to the node, which is limited by the maximum number of disks allowable per zone (see Adding Local SSDs (https://cloud.google.com/compute/docs/disks/local-ssd)). + - !ruby/object:Api::Type::Boolean + name: 'preemptible' + description: | + Optional. Whether the nodes are created as legacy preemptible VM instances (https://cloud.google.com/compute/docs/instances/preemptible). Also see Spot VMs, preemptible VM instances without a maximum lifetime. Legacy and Spot preemptible nodes cannot be used in a node pool with the CONTROLLER role or in the DEFAULT node pool if the CONTROLLER role is not assigned (the DEFAULT node pool will assume the CONTROLLER role). + - !ruby/object:Api::Type::Array + name: 'accelerators' + description: | + Optional. A list of hardware accelerators (https://cloud.google.com/compute/docs/gpus) to attach to each node. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'acceleratorCount' + description: | + The number of accelerator cards exposed to an instance. + - !ruby/object:Api::Type::String + name: 'acceleratorType' + description: | + The accelerator type resource namename (see GPUs on Compute Engine). + - !ruby/object:Api::Type::String + name: 'gpuPartitionSize' + description: | + Size of partitions to create on the GPU. Valid values are described in the NVIDIA mig user guide (https://docs.nvidia.com/datacenter/tesla/mig-user-guide/#partitioning). + - !ruby/object:Api::Type::String + name: 'minCpuPlatform' + description: | + Optional. Minimum CPU platform (https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform) to be used by this instance. The instance may be scheduled on the specified or a newer CPU platform. Specify the friendly names of CPU platforms, such as "Intel Haswell"` or Intel Sandy Bridge". + - !ruby/object:Api::Type::String + name: 'bootDiskKmsKey' + description: | + Optional. The Customer Managed Encryption Key (CMEK) (https://cloud.google.com/kubernetes-engine/docs/how-to/using-cmek) used to encrypt the boot disk attached to each node in the node pool. Specify the key using the following format: projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key} + - !ruby/object:Api::Type::Boolean + name: 'spot' + description: | + Optional. Whether the nodes are created as Spot VM instances (https://cloud.google.com/compute/docs/instances/spot). Spot VMs are the latest update to legacy preemptible VMs. Spot VMs do not have a maximum lifetime. Legacy and Spot preemptible nodes cannot be used in a node pool with the CONTROLLER role or in the DEFAULT node pool if the CONTROLLER role is not assigned (the DEFAULT node pool will assume the CONTROLLER role). + - !ruby/object:Api::Type::Array + name: 'locations' + description: | + Optional. The list of Compute Engine zones (https://cloud.google.com/compute/docs/zones#available) where node pool nodes associated with a Dataproc on GKE virtual cluster will be located.Note: All node pools associated with a virtual cluster must be located in the same region as the virtual cluster, and they must be located in the same zone within that region.If a location is not specified during node pool creation, Dataproc on GKE will choose the zone. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'autoscaling' + description: | + GkeNodePoolAutoscaling contains information the cluster autoscaler needs to adjust the size of the node pool to the current cluster usage. + properties: + - !ruby/object:Api::Type::Integer + name: 'minNodeCount' + description: | + The minimum number of nodes in the node pool. Must be >= 0 and <= max_node_count. + - !ruby/object:Api::Type::Integer + name: 'maxNodeCount' + description: | + The maximum number of nodes in the node pool. Must be >= min_node_count, and must be > 0. Note: Quota must be sufficient to scale up the cluster. + - !ruby/object:Api::Type::NestedObject + name: 'dataprocMetricConfig' + description: | + Dataproc metric config. + properties: + - !ruby/object:Api::Type::Array + name: 'metrics' + description: | + Required. Metrics sources to enable. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::Enum + name: 'metricSource' + description: | + Required. A standard set of metrics is collected unless metricOverrides are specified for the metric source (see Custom metrics (https://cloud.google.com/dataproc/docs/guides/dataproc-metrics#custom_metrics) for more information). + values: + - :METRIC_SOURCE_UNSPECIFIED + - :MONITORING_AGENT_DEFAULTS + - :HDFS + - :SPARK + - :YARN + - :SPARK_HISTORY_SERVER + - :HIVESERVER2 + - :HIVEMETASTORE + - :FLINK + - !ruby/object:Api::Type::Array + name: 'metricOverrides' + description: | + Optional. Specify one or more Custom metrics (https://cloud.google.com/dataproc/docs/guides/dataproc-metrics#custom_metrics) to collect for the metric course (for the SPARK metric source (any Spark metric (https://spark.apache.org/docs/latest/monitoring.html#metrics) can be specified).Provide metrics in the following format: METRIC_SOURCE: INSTANCE:GROUP:METRIC Use camelcase as appropriate.Examples: yarn:ResourceManager:QueueMetrics:AppsCompleted spark:driver:DAGScheduler:job.allJobs sparkHistoryServer:JVM:Memory:NonHeapMemoryUsage.committed hiveserver2:JVM:Memory:NonHeapMemoryUsage.used Notes: Only the specified overridden metrics are collected for the metric source. For example, if one or more spark:executive metrics are listed as metric overrides, other SPARK metrics are not collected. The collection of the metrics for other enabled custom metric sources is unaffected. For example, if both SPARK andd YARN metric sources are enabled, and overrides are provided for Spark metrics only, all YARN metrics are collected. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'auxiliaryNodeGroups' + description: | + Optional. The node group settings. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::NestedObject + name: 'nodeGroup' + description: | + Dataproc Node Group. The Dataproc NodeGroup resource is not related to the Dataproc NodeGroupAffinity resource. + properties: + - !ruby/object:Api::Type::String + name: 'name' + description: | + The Node group resource name (https://aip.dev/122). + - !ruby/object:Api::Type::Array + name: 'roles' + description: | + Required. Node group roles. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'nodeGroupConfig' + description: | + The config settings for Compute Engine resources in an instance group, such as a master or worker group. + properties: + - !ruby/object:Api::Type::Integer + name: 'numInstances' + description: | + Optional. The number of VM instances in the instance group. For HA cluster master_config groups, must be set to 3. For standard cluster master_config groups, must be set to 1. + - !ruby/object:Api::Type::Array + name: 'instanceNames' + description: | + Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'instanceReferences' + description: | + Output only. List of references to Compute Engine instances. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'instanceName' + description: | + The user-friendly name of the Compute Engine instance. + - !ruby/object:Api::Type::String + name: 'instanceId' + description: | + The unique identifier of the Compute Engine instance. + - !ruby/object:Api::Type::String + name: 'publicKey' + description: | + The public RSA key used for sharing data with this instance. + - !ruby/object:Api::Type::String + name: 'publicEciesKey' + description: | + The public ECIES key used for sharing data with this instance. + - !ruby/object:Api::Type::String + name: 'imageUri' + description: | + Optional. The Compute Engine image resource used for cluster instances.The URI can represent an image or image family.Image examples: https://www.googleapis.com/compute/v1/projects/[project_id]/global/images/[image-id] projects/[project_id]/global/images/[image-id] image-idImage family examples. Dataproc will use the most recent image from the family: https://www.googleapis.com/compute/v1/projects/[project_id]/global/images/family/[custom-image-family-name] projects/[project_id]/global/images/family/[custom-image-family-name]If the URI is unspecified, it will be inferred from SoftwareConfig.image_version or the system default. + - !ruby/object:Api::Type::String + name: 'machineTypeUri' + description: | + Optional. The Compute Engine machine type used for cluster instances.A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/machineTypes/n1-standard-2 projects/[project_id]/zones/[zone]/machineTypes/n1-standard-2 n1-standard-2Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, n1-standard-2. + - !ruby/object:Api::Type::NestedObject + name: 'diskConfig' + description: | + Specifies the config of disk options for a group of VM instances. + properties: + - !ruby/object:Api::Type::String + name: 'bootDiskType' + description: | + Optional. Type of the boot disk (default is "pd-standard"). Valid values: "pd-balanced" (Persistent Disk Balanced Solid State Drive), "pd-ssd" (Persistent Disk Solid State Drive), or "pd-standard" (Persistent Disk Hard Disk Drive). See Disk types (https://cloud.google.com/compute/docs/disks#disk-types). + - !ruby/object:Api::Type::Integer + name: 'bootDiskSizeGb' + description: | + Optional. Size in GB of the boot disk (default is 500GB). + - !ruby/object:Api::Type::Integer + name: 'numLocalSsds' + description: | + Optional. Number of attached SSDs, from 0 to 8 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and HDFS (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.Note: Local SSD options may vary by machine type and number of vCPUs selected. + - !ruby/object:Api::Type::String + name: 'localSsdInterface' + description: | + Optional. Interface type of local SSDs (default is "scsi"). Valid values: "scsi" (Small Computer System Interface), "nvme" (Non-Volatile Memory Express). See local SSD performance (https://cloud.google.com/compute/docs/disks/local-ssd#performance). + - !ruby/object:Api::Type::Boolean + name: 'isPreemptible' + description: | + Output only. Specifies that this instance group contains preemptible instances. + - !ruby/object:Api::Type::Enum + name: 'preemptibility' + description: | + Optional. Specifies the preemptibility of the instance group.The default value for master and worker groups is NON_PREEMPTIBLE. This default cannot be changed.The default value for secondary instances is PREEMPTIBLE. + values: + - :PREEMPTIBILITY_UNSPECIFIED + - :NON_PREEMPTIBLE + - :PREEMPTIBLE + - :SPOT + - !ruby/object:Api::Type::NestedObject + name: 'managedGroupConfig' + description: | + Specifies the resources used to actively manage an instance group. + properties: + - !ruby/object:Api::Type::String + name: 'instanceTemplateName' + description: | + Output only. The name of the Instance Template used for the Managed Instance Group. + - !ruby/object:Api::Type::String + name: 'instanceGroupManagerName' + description: | + Output only. The name of the Instance Group Manager for this group. + - !ruby/object:Api::Type::String + name: 'instanceGroupManagerUri' + description: | + Output only. The partial URI to the instance group manager for this group. E.g. projects/my-project/regions/us-central1/instanceGroupManagers/my-igm. + - !ruby/object:Api::Type::Array + name: 'accelerators' + description: | + Optional. The Compute Engine accelerator configuration for these instances. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'acceleratorTypeUri' + description: | + Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. + - !ruby/object:Api::Type::Integer + name: 'acceleratorCount' + description: | + The number of the accelerator cards of this type exposed to this instance. + - !ruby/object:Api::Type::String + name: 'minCpuPlatform' + description: | + Optional. Specifies the minimum cpu platform for the Instance Group. See Dataproc -> Minimum CPU Platform (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). + - !ruby/object:Api::Type::Integer + name: 'minNumInstances' + description: | + Optional. The minimum number of primary worker instances to create. If min_num_instances is set, cluster creation will succeed if the number of primary workers created is at least equal to the min_num_instances number.Example: Cluster creation request with num_instances = 5 and min_num_instances = 3: If 4 VMs are created and 1 instance fails, the failed VM is deleted. The cluster is resized to 4 instances and placed in a RUNNING state. If 2 instances are created and 3 instances fail, the cluster in placed in an ERROR state. The failed VMs are not deleted. + - !ruby/object:Api::Type::NestedObject + name: 'instanceFlexibilityPolicy' + description: | + Instance flexibility Policy allowing a mixture of VM shapes and provisioning models. + properties: + - !ruby/object:Api::Type::Array + name: 'instanceSelectionList' + description: | + Optional. List of instance selection options that the group will use when creating new VMs. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::Array + name: 'machineTypes' + description: | + Optional. Full machine-type names, e.g. "n1-standard-16". + item_type: Api::Type::String + - !ruby/object:Api::Type::Integer + name: 'rank' + description: | + Optional. Preference of this instance selection. Lower number means higher preference. Dataproc will first try to create a VM based on the machine-type with priority rank and fallback to next rank based on availability. Machine types and instance selections with the same priority have the same preference. + - !ruby/object:Api::Type::Array + name: 'instanceSelectionResults' + description: | + Output only. A list of instance selection results in the group. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'machineType' + description: | + Output only. Full machine-type names, e.g. "n1-standard-16". + - !ruby/object:Api::Type::Integer + name: 'vmCount' + description: | + Output only. Number of VM provisioned with the machine_type. + - !ruby/object:Api::Type::NestedObject + name: 'startupConfig' + description: | + Configuration to handle the startup of instances during cluster create and update process. + properties: + - !ruby/object:Api::Type::Integer + name: 'requiredRegistrationFraction' + description: | + Optional. The config setting to enable cluster creation/ updation to be successful only after required_registration_fraction of instances are up and running. This configuration is applicable to only secondary workers for now. The cluster will fail if required_registration_fraction of instances are not available. This will include instance creation, agent registration, and service registration (if enabled). + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + Optional. Node group labels. Label keys must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty. If specified, they must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). The node group must have no more than 32 labelsn. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::String + name: 'nodeGroupId' + description: | + Optional. A node group ID. Generated if not specified.The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of from 3 to 33 characters. + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + Optional. The labels to associate with this cluster.Label keys must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: \p{Ll}\p{Lo}{0,62}Label values must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: \p{Ll}\p{Lo}\p{N}_-{0,63}No more than 32 labels can be associated with a given cluster. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'clusterSelector' + description: | + A selector that chooses target cluster for jobs based on metadata. + properties: + - !ruby/object:Api::Type::String + name: 'zone' + description: | + Optional. The zone where workflow process executes. This parameter does not affect the selection of the cluster.If unspecified, the zone of the first cluster matching the selector is used. + - !ruby/object:Api::Type::NestedObject + name: 'clusterLabels' + description: | + Required. The cluster labels. Cluster must have all labels to match. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::Array + name: 'jobs' + description: | + Required. The Directed Acyclic Graph of Jobs to submit. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'stepId' + description: | + Required. The step id. The id must be unique among all jobs within the template.The step id is used as prefix for job id, as job goog-dataproc-workflow-step-id label, and in prerequisiteStepIds field from other steps.The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between 3 and 50 characters. + - !ruby/object:Api::Type::NestedObject + name: 'hadoopJob' + description: | + A Dataproc job for running Apache Hadoop MapReduce (https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html) jobs on Apache Hadoop YARN (https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html). + properties: + - !ruby/object:Api::Type::String + name: 'mainJarFileUri' + description: | + The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar' + - !ruby/object:Api::Type::String + name: 'mainClass' + description: | + The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in jar_file_uris. + - !ruby/object:Api::Type::Array + name: 'args' + description: | + Optional. The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision might occur that causes an incorrect job submission. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'jarFileUris' + description: | + Optional. Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'fileUris' + description: | + Optional. HCFS (Hadoop Compatible Filesystem) URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'archiveUris' + description: | + Optional. HCFS URIs of archives to be extracted in the working directory of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .zip. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'properties' + description: | + Optional. A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes in user code. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'loggingConfig' + description: | + The runtime logging config of the job. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'driverLogLevels' + description: | + The per-package log levels for the driver. This can include "root" package name to configure rootLogger. Examples: - 'com.google = FATAL' - 'root = INFO' - 'org.apache = DEBUG' + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'sparkJob' + description: | + A Dataproc job for running Apache Spark (https://spark.apache.org/) applications on YARN. + properties: + - !ruby/object:Api::Type::String + name: 'mainJarFileUri' + description: | + The HCFS URI of the jar file that contains the main class. + - !ruby/object:Api::Type::String + name: 'mainClass' + description: | + The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in SparkJob.jar_file_uris. + - !ruby/object:Api::Type::Array + name: 'args' + description: | + Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'jarFileUris' + description: | + Optional. HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'fileUris' + description: | + Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'archiveUris' + description: | + Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'properties' + description: | + Optional. A mapping of property names to values, used to configure Spark. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'loggingConfig' + description: | + The runtime logging config of the job. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'driverLogLevels' + description: | + The per-package log levels for the driver. This can include "root" package name to configure rootLogger. Examples: - 'com.google = FATAL' - 'root = INFO' - 'org.apache = DEBUG' + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'pysparkJob' + description: | + A Dataproc job for running Apache PySpark (https://spark.apache.org/docs/0.9.0/python-programming-guide.html) applications on YARN. + properties: + - !ruby/object:Api::Type::String + name: 'mainPythonFileUri' + description: | + Required. The HCFS URI of the main Python file to use as the driver. Must be a .py file. + - !ruby/object:Api::Type::Array + name: 'args' + description: | + Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'pythonFileUris' + description: | + Optional. HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'jarFileUris' + description: | + Optional. HCFS URIs of jar files to add to the CLASSPATHs of the Python driver and tasks. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'fileUris' + description: | + Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'archiveUris' + description: | + Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'properties' + description: | + Optional. A mapping of property names to values, used to configure PySpark. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'loggingConfig' + description: | + The runtime logging config of the job. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'driverLogLevels' + description: | + The per-package log levels for the driver. This can include "root" package name to configure rootLogger. Examples: - 'com.google = FATAL' - 'root = INFO' - 'org.apache = DEBUG' + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'hiveJob' + description: | + A Dataproc job for running Apache Hive (https://hive.apache.org/) queries on YARN. + properties: + - !ruby/object:Api::Type::String + name: 'queryFileUri' + description: | + The HCFS URI of the script that contains Hive queries. + - !ruby/object:Api::Type::NestedObject + name: 'queryList' + description: | + A list of queries to run on a cluster. + properties: + - !ruby/object:Api::Type::Array + name: 'queries' + description: | + Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": [ "query1", "query2", "query3;query4", ] } } + item_type: Api::Type::String + - !ruby/object:Api::Type::Boolean + name: 'continueOnFailure' + description: | + Optional. Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries. + - !ruby/object:Api::Type::NestedObject + name: 'scriptVariables' + description: | + Optional. Mapping of query variable names to values (equivalent to the Hive command: SET name="value";). + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'properties' + description: | + Optional. A mapping of property names and values, used to configure Hive. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/hive/conf/hive-site.xml, and classes in user code. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::Array + name: 'jarFileUris' + description: | + Optional. HCFS URIs of jar files to add to the CLASSPATH of the Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes and UDFs. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'pigJob' + description: | + A Dataproc job for running Apache Pig (https://pig.apache.org/) queries on YARN. + properties: + - !ruby/object:Api::Type::String + name: 'queryFileUri' + description: | + The HCFS URI of the script that contains the Pig queries. + - !ruby/object:Api::Type::NestedObject + name: 'queryList' + description: | + A list of queries to run on a cluster. + properties: + - !ruby/object:Api::Type::Array + name: 'queries' + description: | + Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": [ "query1", "query2", "query3;query4", ] } } + item_type: Api::Type::String + - !ruby/object:Api::Type::Boolean + name: 'continueOnFailure' + description: | + Optional. Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries. + - !ruby/object:Api::Type::NestedObject + name: 'scriptVariables' + description: | + Optional. Mapping of query variable names to values (equivalent to the Pig command: name=[value]). + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'properties' + description: | + Optional. A mapping of property names to values, used to configure Pig. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/pig/conf/pig.properties, and classes in user code. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::Array + name: 'jarFileUris' + description: | + Optional. HCFS URIs of jar files to add to the CLASSPATH of the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'loggingConfig' + description: | + The runtime logging config of the job. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'driverLogLevels' + description: | + The per-package log levels for the driver. This can include "root" package name to configure rootLogger. Examples: - 'com.google = FATAL' - 'root = INFO' - 'org.apache = DEBUG' + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'sparkRJob' + description: | + A Dataproc job for running Apache SparkR (https://spark.apache.org/docs/latest/sparkr.html) applications on YARN. + properties: + - !ruby/object:Api::Type::String + name: 'mainRFileUri' + description: | + Required. The HCFS URI of the main R file to use as the driver. Must be a .R file. + - !ruby/object:Api::Type::Array + name: 'args' + description: | + Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'fileUris' + description: | + Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'archiveUris' + description: | + Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'properties' + description: | + Optional. A mapping of property names to values, used to configure SparkR. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'loggingConfig' + description: | + The runtime logging config of the job. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'driverLogLevels' + description: | + The per-package log levels for the driver. This can include "root" package name to configure rootLogger. Examples: - 'com.google = FATAL' - 'root = INFO' - 'org.apache = DEBUG' + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'sparkSqlJob' + description: | + A Dataproc job for running Apache Spark SQL (https://spark.apache.org/sql/) queries. + properties: + - !ruby/object:Api::Type::String + name: 'queryFileUri' + description: | + The HCFS URI of the script that contains SQL queries. + - !ruby/object:Api::Type::NestedObject + name: 'queryList' + description: | + A list of queries to run on a cluster. + properties: + - !ruby/object:Api::Type::Array + name: 'queries' + description: | + Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": [ "query1", "query2", "query3;query4", ] } } + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'scriptVariables' + description: | + Optional. Mapping of query variable names to values (equivalent to the Spark SQL command: SET name="value";). + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'properties' + description: | + Optional. A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Dataproc API might be overwritten. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::Array + name: 'jarFileUris' + description: | + Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'loggingConfig' + description: | + The runtime logging config of the job. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'driverLogLevels' + description: | + The per-package log levels for the driver. This can include "root" package name to configure rootLogger. Examples: - 'com.google = FATAL' - 'root = INFO' - 'org.apache = DEBUG' + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'prestoJob' + description: | + A Dataproc job for running Presto (https://prestosql.io/) queries. IMPORTANT: The Dataproc Presto Optional Component (https://cloud.google.com/dataproc/docs/concepts/components/presto) must be enabled when the cluster is created to submit a Presto job to the cluster. + properties: + - !ruby/object:Api::Type::String + name: 'queryFileUri' + description: | + The HCFS URI of the script that contains SQL queries. + - !ruby/object:Api::Type::NestedObject + name: 'queryList' + description: | + A list of queries to run on a cluster. + properties: + - !ruby/object:Api::Type::Array + name: 'queries' + description: | + Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": [ "query1", "query2", "query3;query4", ] } } + item_type: Api::Type::String + - !ruby/object:Api::Type::Boolean + name: 'continueOnFailure' + description: | + Optional. Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries. + - !ruby/object:Api::Type::String + name: 'outputFormat' + description: | + Optional. The format in which query output will be displayed. See the Presto documentation for supported output formats + - !ruby/object:Api::Type::Array + name: 'clientTags' + description: | + Optional. Presto client tags to attach to this query + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'properties' + description: | + Optional. A mapping of property names to values. Used to set Presto session properties (https://prestodb.io/docs/current/sql/set-session.html) Equivalent to using the --session flag in the Presto CLI + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'loggingConfig' + description: | + The runtime logging config of the job. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'driverLogLevels' + description: | + The per-package log levels for the driver. This can include "root" package name to configure rootLogger. Examples: - 'com.google = FATAL' - 'root = INFO' - 'org.apache = DEBUG' + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'trinoJob' + description: | + A Dataproc job for running Trino (https://trino.io/) queries. IMPORTANT: The Dataproc Trino Optional Component (https://cloud.google.com/dataproc/docs/concepts/components/trino) must be enabled when the cluster is created to submit a Trino job to the cluster. + properties: + - !ruby/object:Api::Type::String + name: 'queryFileUri' + description: | + The HCFS URI of the script that contains SQL queries. + - !ruby/object:Api::Type::NestedObject + name: 'queryList' + description: | + A list of queries to run on a cluster. + properties: + - !ruby/object:Api::Type::Array + name: 'queries' + description: | + Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": [ "query1", "query2", "query3;query4", ] } } + item_type: Api::Type::String + - !ruby/object:Api::Type::Boolean + name: 'continueOnFailure' + description: | + Optional. Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries. + - !ruby/object:Api::Type::String + name: 'outputFormat' + description: | + Optional. The format in which query output will be displayed. See the Trino documentation for supported output formats + - !ruby/object:Api::Type::Array + name: 'clientTags' + description: | + Optional. Trino client tags to attach to this query + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'properties' + description: | + Optional. A mapping of property names to values. Used to set Trino session properties (https://trino.io/docs/current/sql/set-session.html) Equivalent to using the --session flag in the Trino CLI + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'loggingConfig' + description: | + The runtime logging config of the job. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'driverLogLevels' + description: | + The per-package log levels for the driver. This can include "root" package name to configure rootLogger. Examples: - 'com.google = FATAL' - 'root = INFO' - 'org.apache = DEBUG' + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'flinkJob' + description: | + A Dataproc job for running Apache Flink applications on YARN. + properties: + - !ruby/object:Api::Type::String + name: 'mainJarFileUri' + description: | + The HCFS URI of the jar file that contains the main class. + - !ruby/object:Api::Type::String + name: 'mainClass' + description: | + The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in jarFileUris. + - !ruby/object:Api::Type::Array + name: 'args' + description: | + Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision might occur that causes an incorrect job submission. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'jarFileUris' + description: | + Optional. HCFS URIs of jar files to add to the CLASSPATHs of the Flink driver and tasks. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'savepointUri' + description: | + Optional. HCFS URI of the savepoint, which contains the last saved progress for starting the current job. + - !ruby/object:Api::Type::NestedObject + name: 'properties' + description: | + Optional. A mapping of property names to values, used to configure Flink. Properties that conflict with values set by the Dataproc API might beoverwritten. Can include properties set in/etc/flink/conf/flink-defaults.conf and classes in user code. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'loggingConfig' + description: | + The runtime logging config of the job. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'driverLogLevels' + description: | + The per-package log levels for the driver. This can include "root" package name to configure rootLogger. Examples: - 'com.google = FATAL' - 'root = INFO' - 'org.apache = DEBUG' + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + Optional. The labels to associate with this job.Label keys must be between 1 and 63 characters long, and must conform to the following regular expression: \p{Ll}\p{Lo}{0,62}Label values must be between 1 and 63 characters long, and must conform to the following regular expression: \p{Ll}\p{Lo}\p{N}_-{0,63}No more than 32 labels can be associated with a given job. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'scheduling' + description: | + Job scheduling options. + properties: + - !ruby/object:Api::Type::Integer + name: 'maxFailuresPerHour' + description: | + Optional. Maximum number of times per hour a driver can be restarted as a result of driver exiting with non-zero code before job is reported failed.A job might be reported as thrashing if the driver exits with a non-zero code four times within a 10-minute window.Maximum value is 10.Note: This restartable job option is not supported in Dataproc workflow templates (https://cloud.google.com/dataproc/docs/concepts/workflows/using-workflows#adding_jobs_to_a_template). + - !ruby/object:Api::Type::Integer + name: 'maxFailuresTotal' + description: | + Optional. Maximum total number of times a driver can be restarted as a result of the driver exiting with a non-zero code. After the maximum number is reached, the job will be reported as failed.Maximum value is 240.Note: Currently, this restartable job option is not supported in Dataproc workflow templates (https://cloud.google.com/dataproc/docs/concepts/workflows/using-workflows#adding_jobs_to_a_template). + - !ruby/object:Api::Type::Array + name: 'prerequisiteStepIds' + description: | + Optional. The optional list of prerequisite job step_ids. If not specified, the job will start at the beginning of workflow. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'parameters' + description: | + Optional. Template parameters whose values are substituted into the template. Values for parameters must be provided when the template is instantiated. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'name' + description: | + Required. Parameter name. The parameter name is used as the key, and paired with the parameter value, which are passed to the template when the template is instantiated. The name must contain only capital letters (A-Z), numbers (0-9), and underscores (_), and must not start with a number. The maximum length is 40 characters. + - !ruby/object:Api::Type::Array + name: 'fields' + description: | + Required. Paths to all fields that the parameter replaces. A field is allowed to appear in at most one parameter's list of field paths.A field path is similar in syntax to a google.protobuf.FieldMask. For example, a field path that references the zone field of a workflow template's cluster selector would be specified as placement.clusterSelector.zone.Also, field paths can reference fields using the following syntax: Values in maps can be referenced by key: labels'key' placement.clusterSelector.clusterLabels'key' placement.managedCluster.labels'key' placement.clusterSelector.clusterLabels'key' jobs'step-id'.labels'key' Jobs in the jobs list can be referenced by step-id: jobs'step-id'.hadoopJob.mainJarFileUri jobs'step-id'.hiveJob.queryFileUri jobs'step-id'.pySparkJob.mainPythonFileUri jobs'step-id'.hadoopJob.jarFileUris0 jobs'step-id'.hadoopJob.archiveUris0 jobs'step-id'.hadoopJob.fileUris0 jobs'step-id'.pySparkJob.pythonFileUris0 Items in repeated fields can be referenced by a zero-based index: jobs'step-id'.sparkJob.args0 Other examples: jobs'step-id'.hadoopJob.properties'key' jobs'step-id'.hadoopJob.args0 jobs'step-id'.hiveJob.scriptVariables'key' jobs'step-id'.hadoopJob.mainJarFileUri placement.clusterSelector.zoneIt may not be possible to parameterize maps and repeated fields in their entirety since only individual map values and individual items in repeated fields can be referenced. For example, the following field paths are invalid: placement.clusterSelector.clusterLabels jobs'step-id'.sparkJob.args + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'description' + description: | + Optional. Brief description of the parameter. Must not exceed 1024 characters. + - !ruby/object:Api::Type::NestedObject + name: 'validation' + description: | + Configuration for parameter validation. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'regex' + description: | + Validation based on regular expressions. + properties: + - !ruby/object:Api::Type::Array + name: 'regexes' + description: | + Required. RE2 regular expressions used to validate the parameter's value. The value must match the regex in its entirety (substring matches are not sufficient). + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'values' + description: | + Validation based on a list of allowed values. + properties: + - !ruby/object:Api::Type::Array + name: 'values' + description: | + Required. List of allowed values for the parameter. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'dagTimeout' + description: | + Optional. Timeout duration for the DAG of jobs, expressed in seconds (see JSON representation of duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). The timeout duration must be from 10 minutes ("600s") to 24 hours ("86400s"). The timer begins when the first job is submitted. If the workflow is running at the end of the timeout period, any remaining jobs are cancelled, the workflow is ended, and if the workflow was running on a managed cluster, the cluster is deleted. + - !ruby/object:Api::Type::NestedObject + name: 'encryptionConfig' + description: | + Encryption settings for encrypting workflow template job arguments. + properties: + - !ruby/object:Api::Type::String + name: 'kmsKey' + description: | + Optional. The Cloud KMS key name to use for encrypting workflow template job arguments.When this this key is provided, the following workflow template job arguments (https://cloud.google.com/dataproc/docs/concepts/workflows/use-workflows#adding_jobs_to_a_template), if present, are CMEK encrypted (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/customer-managed-encryption#use_cmek_with_workflow_template_data): FlinkJob args (https://cloud.google.com/dataproc/docs/reference/rest/v1/FlinkJob) HadoopJob args (https://cloud.google.com/dataproc/docs/reference/rest/v1/HadoopJob) SparkJob args (https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkJob) SparkRJob args (https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkRJob) PySparkJob args (https://cloud.google.com/dataproc/docs/reference/rest/v1/PySparkJob) SparkSqlJob (https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkSqlJob) scriptVariables and queryList.queries HiveJob (https://cloud.google.com/dataproc/docs/reference/rest/v1/HiveJob) scriptVariables and queryList.queries PigJob (https://cloud.google.com/dataproc/docs/reference/rest/v1/PigJob) scriptVariables and queryList.queries PrestoJob (https://cloud.google.com/dataproc/docs/reference/rest/v1/PrestoJob) scriptVariables and queryList.queries + + + + + - !ruby/object:Api::Resource + name: ProjectLocationWorkflowTemplate + base_url: '{{+parent}}/workflowTemplates' + self_link: '{{+name}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/dataproc/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + A Dataproc workflow template resource. + properties: + + - !ruby/object:Api::Type::String + name: 'id' + description: | + + - !ruby/object:Api::Type::String + name: 'name' + description: | + Output only. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} For projects.locations.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id} + - !ruby/object:Api::Type::Integer + name: 'version' + description: | + Optional. Used to perform a consistent read-modify-write.This field should be left blank for a CreateWorkflowTemplate request. It is required for an UpdateWorkflowTemplate request, and must match the current server version. A typical update template flow would fetch the current template with a GetWorkflowTemplate request, which will return the current template with the version field filled in with the current server version. The user updates other fields in the template, then returns it as part of the UpdateWorkflowTemplate request. + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Output only. The time template was created. + - !ruby/object:Api::Type::String + name: 'updateTime' + description: | + Output only. The time template was last updated. + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + Optional. The labels to associate with this template. These labels will be propagated to all jobs and clusters created by the workflow instance.Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt).Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt).No more than 32 labels can be associated with a template. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'placement' + description: | + Specifies workflow execution target.Either managed_cluster or cluster_selector is required. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'managedCluster' + description: | + Cluster that is managed by the workflow. + properties: + - !ruby/object:Api::Type::String + name: 'clusterName' + description: | + Required. The cluster name prefix. A unique cluster name will be formed by appending a random suffix.The name must contain only lower-case letters (a-z), numbers (0-9), and hyphens (-). Must begin with a letter. Cannot begin or end with hyphen. Must consist of between 2 and 35 characters. + - !ruby/object:Api::Type::NestedObject + name: 'config' + description: | + The cluster config. + properties: + - !ruby/object:Api::Type::String + name: 'configBucket' + description: | + Optional. A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see Dataproc staging and temp buckets (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket. + - !ruby/object:Api::Type::String + name: 'tempBucket' + description: | + Optional. A Cloud Storage bucket used to store ephemeral cluster and jobs data, such as Spark and MapReduce history files. If you do not specify a temp bucket, Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's temp bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket. The default bucket has a TTL of 90 days, but you can use any TTL (or none) if you specify a bucket (see Dataproc staging and temp buckets (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket. + - !ruby/object:Api::Type::NestedObject + name: 'gceClusterConfig' + description: | + Common config settings for resources of Compute Engine cluster instances, applicable to all instances in the cluster. + properties: + - !ruby/object:Api::Type::String + name: 'zoneUri' + description: | + Optional. The Compute Engine zone where the Dataproc cluster will be located. If omitted, the service will pick a zone in the cluster's Compute Engine region. On a get request, zone will always be present.A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone] projects/[project_id]/zones/[zone] [zone] + - !ruby/object:Api::Type::String + name: 'networkUri' + description: | + Optional. The Compute Engine network to be used for machine communications. Cannot be specified with subnetwork_uri. If neither network_uri nor subnetwork_uri is specified, the "default" network of the project is used, if it exists. Cannot be a "Custom Subnet Network" (see Using Subnetworks (https://cloud.google.com/compute/docs/subnetworks) for more information).A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/global/networks/default projects/[project_id]/global/networks/default default + - !ruby/object:Api::Type::String + name: 'subnetworkUri' + description: | + Optional. The Compute Engine subnetwork to be used for machine communications. Cannot be specified with network_uri.A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/regions/[region]/subnetworks/sub0 projects/[project_id]/regions/[region]/subnetworks/sub0 sub0 + - !ruby/object:Api::Type::Boolean + name: 'internalIpOnly' + description: | + Optional. This setting applies to subnetwork-enabled networks. It is set to true by default in clusters created with image versions 2.2.x.When set to true: All cluster VMs have internal IP addresses. Google Private Access (https://cloud.google.com/vpc/docs/private-google-access) must be enabled to access Dataproc and other Google Cloud APIs. Off-cluster dependencies must be configured to be accessible without external IP addresses.When set to false: Cluster VMs are not restricted to internal IP addresses. Ephemeral external IP addresses are assigned to each cluster VM. + - !ruby/object:Api::Type::Enum + name: 'privateIpv6GoogleAccess' + description: | + Optional. The type of IPv6 access for a cluster. + values: + - :PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED + - :INHERIT_FROM_SUBNETWORK + - :OUTBOUND + - :BIDIRECTIONAL + - !ruby/object:Api::Type::String + name: 'serviceAccount' + description: | + Optional. The Dataproc service account (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/service-accounts#service_accounts_in_dataproc) (also see VM Data Plane identity (https://cloud.google.com/dataproc/docs/concepts/iam/dataproc-principals#vm_service_account_data_plane_identity)) used by Dataproc cluster VM instances to access Google Cloud Platform services.If not specified, the Compute Engine default service account (https://cloud.google.com/compute/docs/access/service-accounts#default_service_account) is used. + - !ruby/object:Api::Type::Array + name: 'serviceAccountScopes' + description: | + Optional. The URIs of service account scopes to be included in Compute Engine instances. The following base set of scopes is always included: https://www.googleapis.com/auth/cloud.useraccounts.readonly https://www.googleapis.com/auth/devstorage.read_write https://www.googleapis.com/auth/logging.writeIf no scopes are specified, the following defaults are also provided: https://www.googleapis.com/auth/bigquery https://www.googleapis.com/auth/bigtable.admin.table https://www.googleapis.com/auth/bigtable.data https://www.googleapis.com/auth/devstorage.full_control + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'tags' + description: | + The Compute Engine tags to add to all instances (see Tagging instances (https://cloud.google.com/compute/docs/label-or-tag-resources#tags)). + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'metadata' + description: | + Optional. The Compute Engine metadata entries to add to all instances (see Project and instance metadata (https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)). + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'reservationAffinity' + description: | + Reservation Affinity for consuming Zonal reservation. + properties: + - !ruby/object:Api::Type::Enum + name: 'consumeReservationType' + description: | + Optional. Type of reservation to consume + values: + - :TYPE_UNSPECIFIED + - :NO_RESERVATION + - :ANY_RESERVATION + - :SPECIFIC_RESERVATION + - !ruby/object:Api::Type::String + name: 'key' + description: | + Optional. Corresponds to the label key of reservation resource. + - !ruby/object:Api::Type::Array + name: 'values' + description: | + Optional. Corresponds to the label values of reservation resource. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'nodeGroupAffinity' + description: | + Node Group Affinity for clusters using sole-tenant node groups. The Dataproc NodeGroupAffinity resource is not related to the Dataproc NodeGroup resource. + properties: + - !ruby/object:Api::Type::String + name: 'nodeGroupUri' + description: | + Required. The URI of a sole-tenant node group resource (https://cloud.google.com/compute/docs/reference/rest/v1/nodeGroups) that the cluster will be created on.A full URL, partial URI, or node group name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/nodeGroups/node-group-1 projects/[project_id]/zones/[zone]/nodeGroups/node-group-1 node-group-1 + - !ruby/object:Api::Type::NestedObject + name: 'shieldedInstanceConfig' + description: | + Shielded Instance Config for clusters using Compute Engine Shielded VMs (https://cloud.google.com/security/shielded-cloud/shielded-vm). + properties: + - !ruby/object:Api::Type::Boolean + name: 'enableSecureBoot' + description: | + Optional. Defines whether instances have Secure Boot enabled. + - !ruby/object:Api::Type::Boolean + name: 'enableVtpm' + description: | + Optional. Defines whether instances have the vTPM enabled. + - !ruby/object:Api::Type::Boolean + name: 'enableIntegrityMonitoring' + description: | + Optional. Defines whether instances have integrity monitoring enabled. + - !ruby/object:Api::Type::NestedObject + name: 'confidentialInstanceConfig' + description: | + Confidential Instance Config for clusters using Confidential VMs (https://cloud.google.com/compute/confidential-vm/docs) + properties: + - !ruby/object:Api::Type::Boolean + name: 'enableConfidentialCompute' + description: | + Optional. Defines whether the instance should have confidential compute enabled. + - !ruby/object:Api::Type::NestedObject + name: 'masterConfig' + description: | + The config settings for Compute Engine resources in an instance group, such as a master or worker group. + properties: + - !ruby/object:Api::Type::Integer + name: 'numInstances' + description: | + Optional. The number of VM instances in the instance group. For HA cluster master_config groups, must be set to 3. For standard cluster master_config groups, must be set to 1. + - !ruby/object:Api::Type::Array + name: 'instanceNames' + description: | + Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'instanceReferences' + description: | + Output only. List of references to Compute Engine instances. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'instanceName' + description: | + The user-friendly name of the Compute Engine instance. + - !ruby/object:Api::Type::String + name: 'instanceId' + description: | + The unique identifier of the Compute Engine instance. + - !ruby/object:Api::Type::String + name: 'publicKey' + description: | + The public RSA key used for sharing data with this instance. + - !ruby/object:Api::Type::String + name: 'publicEciesKey' + description: | + The public ECIES key used for sharing data with this instance. + - !ruby/object:Api::Type::String + name: 'imageUri' + description: | + Optional. The Compute Engine image resource used for cluster instances.The URI can represent an image or image family.Image examples: https://www.googleapis.com/compute/v1/projects/[project_id]/global/images/[image-id] projects/[project_id]/global/images/[image-id] image-idImage family examples. Dataproc will use the most recent image from the family: https://www.googleapis.com/compute/v1/projects/[project_id]/global/images/family/[custom-image-family-name] projects/[project_id]/global/images/family/[custom-image-family-name]If the URI is unspecified, it will be inferred from SoftwareConfig.image_version or the system default. + - !ruby/object:Api::Type::String + name: 'machineTypeUri' + description: | + Optional. The Compute Engine machine type used for cluster instances.A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/machineTypes/n1-standard-2 projects/[project_id]/zones/[zone]/machineTypes/n1-standard-2 n1-standard-2Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, n1-standard-2. + - !ruby/object:Api::Type::NestedObject + name: 'diskConfig' + description: | + Specifies the config of disk options for a group of VM instances. + properties: + - !ruby/object:Api::Type::String + name: 'bootDiskType' + description: | + Optional. Type of the boot disk (default is "pd-standard"). Valid values: "pd-balanced" (Persistent Disk Balanced Solid State Drive), "pd-ssd" (Persistent Disk Solid State Drive), or "pd-standard" (Persistent Disk Hard Disk Drive). See Disk types (https://cloud.google.com/compute/docs/disks#disk-types). + - !ruby/object:Api::Type::Integer + name: 'bootDiskSizeGb' + description: | + Optional. Size in GB of the boot disk (default is 500GB). + - !ruby/object:Api::Type::Integer + name: 'numLocalSsds' + description: | + Optional. Number of attached SSDs, from 0 to 8 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and HDFS (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.Note: Local SSD options may vary by machine type and number of vCPUs selected. + - !ruby/object:Api::Type::String + name: 'localSsdInterface' + description: | + Optional. Interface type of local SSDs (default is "scsi"). Valid values: "scsi" (Small Computer System Interface), "nvme" (Non-Volatile Memory Express). See local SSD performance (https://cloud.google.com/compute/docs/disks/local-ssd#performance). + - !ruby/object:Api::Type::Boolean + name: 'isPreemptible' + description: | + Output only. Specifies that this instance group contains preemptible instances. + - !ruby/object:Api::Type::Enum + name: 'preemptibility' + description: | + Optional. Specifies the preemptibility of the instance group.The default value for master and worker groups is NON_PREEMPTIBLE. This default cannot be changed.The default value for secondary instances is PREEMPTIBLE. + values: + - :PREEMPTIBILITY_UNSPECIFIED + - :NON_PREEMPTIBLE + - :PREEMPTIBLE + - :SPOT + - !ruby/object:Api::Type::NestedObject + name: 'managedGroupConfig' + description: | + Specifies the resources used to actively manage an instance group. + properties: + - !ruby/object:Api::Type::String + name: 'instanceTemplateName' + description: | + Output only. The name of the Instance Template used for the Managed Instance Group. + - !ruby/object:Api::Type::String + name: 'instanceGroupManagerName' + description: | + Output only. The name of the Instance Group Manager for this group. + - !ruby/object:Api::Type::String + name: 'instanceGroupManagerUri' + description: | + Output only. The partial URI to the instance group manager for this group. E.g. projects/my-project/regions/us-central1/instanceGroupManagers/my-igm. + - !ruby/object:Api::Type::Array + name: 'accelerators' + description: | + Optional. The Compute Engine accelerator configuration for these instances. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'acceleratorTypeUri' + description: | + Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. + - !ruby/object:Api::Type::Integer + name: 'acceleratorCount' + description: | + The number of the accelerator cards of this type exposed to this instance. + - !ruby/object:Api::Type::String + name: 'minCpuPlatform' + description: | + Optional. Specifies the minimum cpu platform for the Instance Group. See Dataproc -> Minimum CPU Platform (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). + - !ruby/object:Api::Type::Integer + name: 'minNumInstances' + description: | + Optional. The minimum number of primary worker instances to create. If min_num_instances is set, cluster creation will succeed if the number of primary workers created is at least equal to the min_num_instances number.Example: Cluster creation request with num_instances = 5 and min_num_instances = 3: If 4 VMs are created and 1 instance fails, the failed VM is deleted. The cluster is resized to 4 instances and placed in a RUNNING state. If 2 instances are created and 3 instances fail, the cluster in placed in an ERROR state. The failed VMs are not deleted. + - !ruby/object:Api::Type::NestedObject + name: 'instanceFlexibilityPolicy' + description: | + Instance flexibility Policy allowing a mixture of VM shapes and provisioning models. + properties: + - !ruby/object:Api::Type::Array + name: 'instanceSelectionList' + description: | + Optional. List of instance selection options that the group will use when creating new VMs. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::Array + name: 'machineTypes' + description: | + Optional. Full machine-type names, e.g. "n1-standard-16". + item_type: Api::Type::String + - !ruby/object:Api::Type::Integer + name: 'rank' + description: | + Optional. Preference of this instance selection. Lower number means higher preference. Dataproc will first try to create a VM based on the machine-type with priority rank and fallback to next rank based on availability. Machine types and instance selections with the same priority have the same preference. + - !ruby/object:Api::Type::Array + name: 'instanceSelectionResults' + description: | + Output only. A list of instance selection results in the group. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'machineType' + description: | + Output only. Full machine-type names, e.g. "n1-standard-16". + - !ruby/object:Api::Type::Integer + name: 'vmCount' + description: | + Output only. Number of VM provisioned with the machine_type. + - !ruby/object:Api::Type::NestedObject + name: 'startupConfig' + description: | + Configuration to handle the startup of instances during cluster create and update process. + properties: + - !ruby/object:Api::Type::Integer + name: 'requiredRegistrationFraction' + description: | + Optional. The config setting to enable cluster creation/ updation to be successful only after required_registration_fraction of instances are up and running. This configuration is applicable to only secondary workers for now. The cluster will fail if required_registration_fraction of instances are not available. This will include instance creation, agent registration, and service registration (if enabled). + - !ruby/object:Api::Type::NestedObject + name: 'workerConfig' + description: | + The config settings for Compute Engine resources in an instance group, such as a master or worker group. + properties: + - !ruby/object:Api::Type::Integer + name: 'numInstances' + description: | + Optional. The number of VM instances in the instance group. For HA cluster master_config groups, must be set to 3. For standard cluster master_config groups, must be set to 1. + - !ruby/object:Api::Type::Array + name: 'instanceNames' + description: | + Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'instanceReferences' + description: | + Output only. List of references to Compute Engine instances. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'instanceName' + description: | + The user-friendly name of the Compute Engine instance. + - !ruby/object:Api::Type::String + name: 'instanceId' + description: | + The unique identifier of the Compute Engine instance. + - !ruby/object:Api::Type::String + name: 'publicKey' + description: | + The public RSA key used for sharing data with this instance. + - !ruby/object:Api::Type::String + name: 'publicEciesKey' + description: | + The public ECIES key used for sharing data with this instance. + - !ruby/object:Api::Type::String + name: 'imageUri' + description: | + Optional. The Compute Engine image resource used for cluster instances.The URI can represent an image or image family.Image examples: https://www.googleapis.com/compute/v1/projects/[project_id]/global/images/[image-id] projects/[project_id]/global/images/[image-id] image-idImage family examples. Dataproc will use the most recent image from the family: https://www.googleapis.com/compute/v1/projects/[project_id]/global/images/family/[custom-image-family-name] projects/[project_id]/global/images/family/[custom-image-family-name]If the URI is unspecified, it will be inferred from SoftwareConfig.image_version or the system default. + - !ruby/object:Api::Type::String + name: 'machineTypeUri' + description: | + Optional. The Compute Engine machine type used for cluster instances.A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/machineTypes/n1-standard-2 projects/[project_id]/zones/[zone]/machineTypes/n1-standard-2 n1-standard-2Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, n1-standard-2. + - !ruby/object:Api::Type::NestedObject + name: 'diskConfig' + description: | + Specifies the config of disk options for a group of VM instances. + properties: + - !ruby/object:Api::Type::String + name: 'bootDiskType' + description: | + Optional. Type of the boot disk (default is "pd-standard"). Valid values: "pd-balanced" (Persistent Disk Balanced Solid State Drive), "pd-ssd" (Persistent Disk Solid State Drive), or "pd-standard" (Persistent Disk Hard Disk Drive). See Disk types (https://cloud.google.com/compute/docs/disks#disk-types). + - !ruby/object:Api::Type::Integer + name: 'bootDiskSizeGb' + description: | + Optional. Size in GB of the boot disk (default is 500GB). + - !ruby/object:Api::Type::Integer + name: 'numLocalSsds' + description: | + Optional. Number of attached SSDs, from 0 to 8 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and HDFS (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.Note: Local SSD options may vary by machine type and number of vCPUs selected. + - !ruby/object:Api::Type::String + name: 'localSsdInterface' + description: | + Optional. Interface type of local SSDs (default is "scsi"). Valid values: "scsi" (Small Computer System Interface), "nvme" (Non-Volatile Memory Express). See local SSD performance (https://cloud.google.com/compute/docs/disks/local-ssd#performance). + - !ruby/object:Api::Type::Boolean + name: 'isPreemptible' + description: | + Output only. Specifies that this instance group contains preemptible instances. + - !ruby/object:Api::Type::Enum + name: 'preemptibility' + description: | + Optional. Specifies the preemptibility of the instance group.The default value for master and worker groups is NON_PREEMPTIBLE. This default cannot be changed.The default value for secondary instances is PREEMPTIBLE. + values: + - :PREEMPTIBILITY_UNSPECIFIED + - :NON_PREEMPTIBLE + - :PREEMPTIBLE + - :SPOT + - !ruby/object:Api::Type::NestedObject + name: 'managedGroupConfig' + description: | + Specifies the resources used to actively manage an instance group. + properties: + - !ruby/object:Api::Type::String + name: 'instanceTemplateName' + description: | + Output only. The name of the Instance Template used for the Managed Instance Group. + - !ruby/object:Api::Type::String + name: 'instanceGroupManagerName' + description: | + Output only. The name of the Instance Group Manager for this group. + - !ruby/object:Api::Type::String + name: 'instanceGroupManagerUri' + description: | + Output only. The partial URI to the instance group manager for this group. E.g. projects/my-project/regions/us-central1/instanceGroupManagers/my-igm. + - !ruby/object:Api::Type::Array + name: 'accelerators' + description: | + Optional. The Compute Engine accelerator configuration for these instances. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'acceleratorTypeUri' + description: | + Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. + - !ruby/object:Api::Type::Integer + name: 'acceleratorCount' + description: | + The number of the accelerator cards of this type exposed to this instance. + - !ruby/object:Api::Type::String + name: 'minCpuPlatform' + description: | + Optional. Specifies the minimum cpu platform for the Instance Group. See Dataproc -> Minimum CPU Platform (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). + - !ruby/object:Api::Type::Integer + name: 'minNumInstances' + description: | + Optional. The minimum number of primary worker instances to create. If min_num_instances is set, cluster creation will succeed if the number of primary workers created is at least equal to the min_num_instances number.Example: Cluster creation request with num_instances = 5 and min_num_instances = 3: If 4 VMs are created and 1 instance fails, the failed VM is deleted. The cluster is resized to 4 instances and placed in a RUNNING state. If 2 instances are created and 3 instances fail, the cluster in placed in an ERROR state. The failed VMs are not deleted. + - !ruby/object:Api::Type::NestedObject + name: 'instanceFlexibilityPolicy' + description: | + Instance flexibility Policy allowing a mixture of VM shapes and provisioning models. + properties: + - !ruby/object:Api::Type::Array + name: 'instanceSelectionList' + description: | + Optional. List of instance selection options that the group will use when creating new VMs. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::Array + name: 'machineTypes' + description: | + Optional. Full machine-type names, e.g. "n1-standard-16". + item_type: Api::Type::String + - !ruby/object:Api::Type::Integer + name: 'rank' + description: | + Optional. Preference of this instance selection. Lower number means higher preference. Dataproc will first try to create a VM based on the machine-type with priority rank and fallback to next rank based on availability. Machine types and instance selections with the same priority have the same preference. + - !ruby/object:Api::Type::Array + name: 'instanceSelectionResults' + description: | + Output only. A list of instance selection results in the group. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'machineType' + description: | + Output only. Full machine-type names, e.g. "n1-standard-16". + - !ruby/object:Api::Type::Integer + name: 'vmCount' + description: | + Output only. Number of VM provisioned with the machine_type. + - !ruby/object:Api::Type::NestedObject + name: 'startupConfig' + description: | + Configuration to handle the startup of instances during cluster create and update process. + properties: + - !ruby/object:Api::Type::Integer + name: 'requiredRegistrationFraction' + description: | + Optional. The config setting to enable cluster creation/ updation to be successful only after required_registration_fraction of instances are up and running. This configuration is applicable to only secondary workers for now. The cluster will fail if required_registration_fraction of instances are not available. This will include instance creation, agent registration, and service registration (if enabled). + - !ruby/object:Api::Type::NestedObject + name: 'secondaryWorkerConfig' + description: | + The config settings for Compute Engine resources in an instance group, such as a master or worker group. + properties: + - !ruby/object:Api::Type::Integer + name: 'numInstances' + description: | + Optional. The number of VM instances in the instance group. For HA cluster master_config groups, must be set to 3. For standard cluster master_config groups, must be set to 1. + - !ruby/object:Api::Type::Array + name: 'instanceNames' + description: | + Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'instanceReferences' + description: | + Output only. List of references to Compute Engine instances. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'instanceName' + description: | + The user-friendly name of the Compute Engine instance. + - !ruby/object:Api::Type::String + name: 'instanceId' + description: | + The unique identifier of the Compute Engine instance. + - !ruby/object:Api::Type::String + name: 'publicKey' + description: | + The public RSA key used for sharing data with this instance. + - !ruby/object:Api::Type::String + name: 'publicEciesKey' + description: | + The public ECIES key used for sharing data with this instance. + - !ruby/object:Api::Type::String + name: 'imageUri' + description: | + Optional. The Compute Engine image resource used for cluster instances.The URI can represent an image or image family.Image examples: https://www.googleapis.com/compute/v1/projects/[project_id]/global/images/[image-id] projects/[project_id]/global/images/[image-id] image-idImage family examples. Dataproc will use the most recent image from the family: https://www.googleapis.com/compute/v1/projects/[project_id]/global/images/family/[custom-image-family-name] projects/[project_id]/global/images/family/[custom-image-family-name]If the URI is unspecified, it will be inferred from SoftwareConfig.image_version or the system default. + - !ruby/object:Api::Type::String + name: 'machineTypeUri' + description: | + Optional. The Compute Engine machine type used for cluster instances.A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/machineTypes/n1-standard-2 projects/[project_id]/zones/[zone]/machineTypes/n1-standard-2 n1-standard-2Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, n1-standard-2. + - !ruby/object:Api::Type::NestedObject + name: 'diskConfig' + description: | + Specifies the config of disk options for a group of VM instances. + properties: + - !ruby/object:Api::Type::String + name: 'bootDiskType' + description: | + Optional. Type of the boot disk (default is "pd-standard"). Valid values: "pd-balanced" (Persistent Disk Balanced Solid State Drive), "pd-ssd" (Persistent Disk Solid State Drive), or "pd-standard" (Persistent Disk Hard Disk Drive). See Disk types (https://cloud.google.com/compute/docs/disks#disk-types). + - !ruby/object:Api::Type::Integer + name: 'bootDiskSizeGb' + description: | + Optional. Size in GB of the boot disk (default is 500GB). + - !ruby/object:Api::Type::Integer + name: 'numLocalSsds' + description: | + Optional. Number of attached SSDs, from 0 to 8 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and HDFS (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.Note: Local SSD options may vary by machine type and number of vCPUs selected. + - !ruby/object:Api::Type::String + name: 'localSsdInterface' + description: | + Optional. Interface type of local SSDs (default is "scsi"). Valid values: "scsi" (Small Computer System Interface), "nvme" (Non-Volatile Memory Express). See local SSD performance (https://cloud.google.com/compute/docs/disks/local-ssd#performance). + - !ruby/object:Api::Type::Boolean + name: 'isPreemptible' + description: | + Output only. Specifies that this instance group contains preemptible instances. + - !ruby/object:Api::Type::Enum + name: 'preemptibility' + description: | + Optional. Specifies the preemptibility of the instance group.The default value for master and worker groups is NON_PREEMPTIBLE. This default cannot be changed.The default value for secondary instances is PREEMPTIBLE. + values: + - :PREEMPTIBILITY_UNSPECIFIED + - :NON_PREEMPTIBLE + - :PREEMPTIBLE + - :SPOT + - !ruby/object:Api::Type::NestedObject + name: 'managedGroupConfig' + description: | + Specifies the resources used to actively manage an instance group. + properties: + - !ruby/object:Api::Type::String + name: 'instanceTemplateName' + description: | + Output only. The name of the Instance Template used for the Managed Instance Group. + - !ruby/object:Api::Type::String + name: 'instanceGroupManagerName' + description: | + Output only. The name of the Instance Group Manager for this group. + - !ruby/object:Api::Type::String + name: 'instanceGroupManagerUri' + description: | + Output only. The partial URI to the instance group manager for this group. E.g. projects/my-project/regions/us-central1/instanceGroupManagers/my-igm. + - !ruby/object:Api::Type::Array + name: 'accelerators' + description: | + Optional. The Compute Engine accelerator configuration for these instances. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'acceleratorTypeUri' + description: | + Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. + - !ruby/object:Api::Type::Integer + name: 'acceleratorCount' + description: | + The number of the accelerator cards of this type exposed to this instance. + - !ruby/object:Api::Type::String + name: 'minCpuPlatform' + description: | + Optional. Specifies the minimum cpu platform for the Instance Group. See Dataproc -> Minimum CPU Platform (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). + - !ruby/object:Api::Type::Integer + name: 'minNumInstances' + description: | + Optional. The minimum number of primary worker instances to create. If min_num_instances is set, cluster creation will succeed if the number of primary workers created is at least equal to the min_num_instances number.Example: Cluster creation request with num_instances = 5 and min_num_instances = 3: If 4 VMs are created and 1 instance fails, the failed VM is deleted. The cluster is resized to 4 instances and placed in a RUNNING state. If 2 instances are created and 3 instances fail, the cluster in placed in an ERROR state. The failed VMs are not deleted. + - !ruby/object:Api::Type::NestedObject + name: 'instanceFlexibilityPolicy' + description: | + Instance flexibility Policy allowing a mixture of VM shapes and provisioning models. + properties: + - !ruby/object:Api::Type::Array + name: 'instanceSelectionList' + description: | + Optional. List of instance selection options that the group will use when creating new VMs. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::Array + name: 'machineTypes' + description: | + Optional. Full machine-type names, e.g. "n1-standard-16". + item_type: Api::Type::String + - !ruby/object:Api::Type::Integer + name: 'rank' + description: | + Optional. Preference of this instance selection. Lower number means higher preference. Dataproc will first try to create a VM based on the machine-type with priority rank and fallback to next rank based on availability. Machine types and instance selections with the same priority have the same preference. + - !ruby/object:Api::Type::Array + name: 'instanceSelectionResults' + description: | + Output only. A list of instance selection results in the group. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'machineType' + description: | + Output only. Full machine-type names, e.g. "n1-standard-16". + - !ruby/object:Api::Type::Integer + name: 'vmCount' + description: | + Output only. Number of VM provisioned with the machine_type. + - !ruby/object:Api::Type::NestedObject + name: 'startupConfig' + description: | + Configuration to handle the startup of instances during cluster create and update process. + properties: + - !ruby/object:Api::Type::Integer + name: 'requiredRegistrationFraction' + description: | + Optional. The config setting to enable cluster creation/ updation to be successful only after required_registration_fraction of instances are up and running. This configuration is applicable to only secondary workers for now. The cluster will fail if required_registration_fraction of instances are not available. This will include instance creation, agent registration, and service registration (if enabled). + - !ruby/object:Api::Type::NestedObject + name: 'softwareConfig' + description: | + Specifies the selection and config of software inside the cluster. + properties: + - !ruby/object:Api::Type::String + name: 'imageVersion' + description: | + Optional. The version of software inside the cluster. It must be one of the supported Dataproc Versions (https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#supported_dataproc_versions), such as "1.2" (including a subminor version, such as "1.2.29"), or the "preview" version (https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#other_versions). If unspecified, it defaults to the latest Debian version. + - !ruby/object:Api::Type::NestedObject + name: 'properties' + description: | + Optional. The properties to set on daemon config files.Property keys are specified in prefix:property format, for example core:hadoop.tmp.dir. The following are supported prefixes and their mappings: capacity-scheduler: capacity-scheduler.xml core: core-site.xml distcp: distcp-default.xml hdfs: hdfs-site.xml hive: hive-site.xml mapred: mapred-site.xml pig: pig.properties spark: spark-defaults.conf yarn: yarn-site.xmlFor more information, see Cluster properties (https://cloud.google.com/dataproc/docs/concepts/cluster-properties). + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::Array + name: 'optionalComponents' + description: | + Optional. The set of components to activate on the cluster. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'initializationActions' + description: | + Optional. Commands to execute on each node after config is completed. By default, executables are run on master and all worker nodes. You can test a node's role metadata to run an executable on a master or worker node, as shown below using curl (you can also use wget): ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) if [[ "${ROLE}" == 'Master' ]]; then ... master specific actions ... else ... worker specific actions ... fi + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'executableFile' + description: | + Required. Cloud Storage URI of executable file. + - !ruby/object:Api::Type::String + name: 'executionTimeout' + description: | + Optional. Amount of time executable has to complete. Default is 10 minutes (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)).Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period. + - !ruby/object:Api::Type::NestedObject + name: 'encryptionConfig' + description: | + Encryption settings for the cluster. + properties: + - !ruby/object:Api::Type::String + name: 'gcePdKmsKeyName' + description: | + Optional. The Cloud KMS key resource name to use for persistent disk encryption for all instances in the cluster. See Use CMEK with cluster data (https://cloud.google.com//dataproc/docs/concepts/configuring-clusters/customer-managed-encryption#use_cmek_with_cluster_data) for more information. + - !ruby/object:Api::Type::String + name: 'kmsKey' + description: | + Optional. The Cloud KMS key resource name to use for cluster persistent disk and job argument encryption. See Use CMEK with cluster data (https://cloud.google.com//dataproc/docs/concepts/configuring-clusters/customer-managed-encryption#use_cmek_with_cluster_data) for more information.When this key resource name is provided, the following job arguments of the following job types submitted to the cluster are encrypted using CMEK: FlinkJob args (https://cloud.google.com/dataproc/docs/reference/rest/v1/FlinkJob) HadoopJob args (https://cloud.google.com/dataproc/docs/reference/rest/v1/HadoopJob) SparkJob args (https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkJob) SparkRJob args (https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkRJob) PySparkJob args (https://cloud.google.com/dataproc/docs/reference/rest/v1/PySparkJob) SparkSqlJob (https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkSqlJob) scriptVariables and queryList.queries HiveJob (https://cloud.google.com/dataproc/docs/reference/rest/v1/HiveJob) scriptVariables and queryList.queries PigJob (https://cloud.google.com/dataproc/docs/reference/rest/v1/PigJob) scriptVariables and queryList.queries PrestoJob (https://cloud.google.com/dataproc/docs/reference/rest/v1/PrestoJob) scriptVariables and queryList.queries + - !ruby/object:Api::Type::NestedObject + name: 'autoscalingConfig' + description: | + Autoscaling Policy config associated with the cluster. + properties: + - !ruby/object:Api::Type::String + name: 'policyUri' + description: | + Optional. The autoscaling policy used by the cluster.Only resource names including projectid and location (region) are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id] projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]Note that the policy must be in the same project and Dataproc region. + - !ruby/object:Api::Type::NestedObject + name: 'securityConfig' + description: | + Security related configuration, including encryption, Kerberos, etc. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'kerberosConfig' + description: | + Specifies Kerberos related configuration. + properties: + - !ruby/object:Api::Type::Boolean + name: 'enableKerberos' + description: | + Optional. Flag to indicate whether to Kerberize the cluster (default: false). Set this field to true to enable Kerberos on a cluster. + - !ruby/object:Api::Type::String + name: 'rootPrincipalPasswordUri' + description: | + Optional. The Cloud Storage URI of a KMS encrypted file containing the root principal password. + - !ruby/object:Api::Type::String + name: 'kmsKeyUri' + description: | + Optional. The URI of the KMS key used to encrypt sensitive files. + - !ruby/object:Api::Type::String + name: 'keystoreUri' + description: | + Optional. The Cloud Storage URI of the keystore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate. + - !ruby/object:Api::Type::String + name: 'truststoreUri' + description: | + Optional. The Cloud Storage URI of the truststore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate. + - !ruby/object:Api::Type::String + name: 'keystorePasswordUri' + description: | + Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided keystore. For the self-signed certificate, this password is generated by Dataproc. + - !ruby/object:Api::Type::String + name: 'keyPasswordUri' + description: | + Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided key. For the self-signed certificate, this password is generated by Dataproc. + - !ruby/object:Api::Type::String + name: 'truststorePasswordUri' + description: | + Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided truststore. For the self-signed certificate, this password is generated by Dataproc. + - !ruby/object:Api::Type::String + name: 'crossRealmTrustRealm' + description: | + Optional. The remote realm the Dataproc on-cluster KDC will trust, should the user enable cross realm trust. + - !ruby/object:Api::Type::String + name: 'crossRealmTrustKdc' + description: | + Optional. The KDC (IP or hostname) for the remote trusted realm in a cross realm trust relationship. + - !ruby/object:Api::Type::String + name: 'crossRealmTrustAdminServer' + description: | + Optional. The admin server (IP or hostname) for the remote trusted realm in a cross realm trust relationship. + - !ruby/object:Api::Type::String + name: 'crossRealmTrustSharedPasswordUri' + description: | + Optional. The Cloud Storage URI of a KMS encrypted file containing the shared password between the on-cluster Kerberos realm and the remote trusted realm, in a cross realm trust relationship. + - !ruby/object:Api::Type::String + name: 'kdcDbKeyUri' + description: | + Optional. The Cloud Storage URI of a KMS encrypted file containing the master key of the KDC database. + - !ruby/object:Api::Type::Integer + name: 'tgtLifetimeHours' + description: | + Optional. The lifetime of the ticket granting ticket, in hours. If not specified, or user specifies 0, then default value 10 will be used. + - !ruby/object:Api::Type::String + name: 'realm' + description: | + Optional. The name of the on-cluster Kerberos realm. If not specified, the uppercased domain of hostnames will be the realm. + - !ruby/object:Api::Type::NestedObject + name: 'identityConfig' + description: | + Identity related configuration, including service account based secure multi-tenancy user mappings. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'userServiceAccountMapping' + description: | + Required. Map of user to service account. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'lifecycleConfig' + description: | + Specifies the cluster auto-delete schedule configuration. + properties: + - !ruby/object:Api::Type::String + name: 'idleDeleteTtl' + description: | + Optional. The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). + - !ruby/object:Api::Type::String + name: 'autoDeleteTime' + description: | + Optional. The time when cluster will be auto-deleted (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)). + - !ruby/object:Api::Type::String + name: 'autoDeleteTtl' + description: | + Optional. The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). + - !ruby/object:Api::Type::String + name: 'idleStartTime' + description: | + Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)). + - !ruby/object:Api::Type::NestedObject + name: 'endpointConfig' + description: | + Endpoint config for this cluster + properties: + - !ruby/object:Api::Type::NestedObject + name: 'httpPorts' + description: | + Output only. The map of port descriptions to URLs. Will only be populated if enable_http_port_access is true. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::Boolean + name: 'enableHttpPortAccess' + description: | + Optional. If true, enable http access to specific ports on the cluster from external sources. Defaults to false. + - !ruby/object:Api::Type::NestedObject + name: 'metastoreConfig' + description: | + Specifies a Metastore configuration. + properties: + - !ruby/object:Api::Type::String + name: 'dataprocMetastoreService' + description: | + Required. Resource name of an existing Dataproc Metastore service.Example: projects/[project_id]/locations/[dataproc_region]/services/[service-name] + - !ruby/object:Api::Type::NestedObject + name: 'gkeClusterConfig' + description: | + The cluster's GKE config. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'namespacedGkeDeploymentTarget' + description: | + Deprecated. Used only for the deprecated beta. A full, namespace-isolated deployment target for an existing GKE cluster. + properties: + - !ruby/object:Api::Type::String + name: 'targetGkeCluster' + description: | + Optional. The target GKE cluster to deploy to. Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}' + - !ruby/object:Api::Type::String + name: 'clusterNamespace' + description: | + Optional. A namespace within the GKE cluster to deploy into. + - !ruby/object:Api::Type::String + name: 'gkeClusterTarget' + description: | + Optional. A target GKE cluster to deploy to. It must be in the same project and region as the Dataproc cluster (the GKE cluster can be zonal or regional). Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}' + - !ruby/object:Api::Type::Array + name: 'nodePoolTarget' + description: | + Optional. GKE node pools where workloads will be scheduled. At least one node pool must be assigned the DEFAULT GkeNodePoolTarget.Role. If a GkeNodePoolTarget is not specified, Dataproc constructs a DEFAULT GkeNodePoolTarget. Each role can be given to only one GkeNodePoolTarget. All node pools must have the same location settings. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'nodePool' + description: | + Required. The target GKE node pool. Format: 'projects/{project}/locations/{location}/clusters/{cluster}/nodePools/{node_pool}' + - !ruby/object:Api::Type::Array + name: 'roles' + description: | + Required. The roles associated with the GKE node pool. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'nodePoolConfig' + description: | + The configuration of a GKE node pool used by a Dataproc-on-GKE cluster (https://cloud.google.com/dataproc/docs/concepts/jobs/dataproc-gke#create-a-dataproc-on-gke-cluster). + properties: + - !ruby/object:Api::Type::NestedObject + name: 'config' + description: | + Parameters that describe cluster nodes. + properties: + - !ruby/object:Api::Type::String + name: 'machineType' + description: | + Optional. The name of a Compute Engine machine type (https://cloud.google.com/compute/docs/machine-types). + - !ruby/object:Api::Type::Integer + name: 'localSsdCount' + description: | + Optional. The number of local SSD disks to attach to the node, which is limited by the maximum number of disks allowable per zone (see Adding Local SSDs (https://cloud.google.com/compute/docs/disks/local-ssd)). + - !ruby/object:Api::Type::Boolean + name: 'preemptible' + description: | + Optional. Whether the nodes are created as legacy preemptible VM instances (https://cloud.google.com/compute/docs/instances/preemptible). Also see Spot VMs, preemptible VM instances without a maximum lifetime. Legacy and Spot preemptible nodes cannot be used in a node pool with the CONTROLLER role or in the DEFAULT node pool if the CONTROLLER role is not assigned (the DEFAULT node pool will assume the CONTROLLER role). + - !ruby/object:Api::Type::Array + name: 'accelerators' + description: | + Optional. A list of hardware accelerators (https://cloud.google.com/compute/docs/gpus) to attach to each node. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'acceleratorCount' + description: | + The number of accelerator cards exposed to an instance. + - !ruby/object:Api::Type::String + name: 'acceleratorType' + description: | + The accelerator type resource namename (see GPUs on Compute Engine). + - !ruby/object:Api::Type::String + name: 'gpuPartitionSize' + description: | + Size of partitions to create on the GPU. Valid values are described in the NVIDIA mig user guide (https://docs.nvidia.com/datacenter/tesla/mig-user-guide/#partitioning). + - !ruby/object:Api::Type::String + name: 'minCpuPlatform' + description: | + Optional. Minimum CPU platform (https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform) to be used by this instance. The instance may be scheduled on the specified or a newer CPU platform. Specify the friendly names of CPU platforms, such as "Intel Haswell"` or Intel Sandy Bridge". + - !ruby/object:Api::Type::String + name: 'bootDiskKmsKey' + description: | + Optional. The Customer Managed Encryption Key (CMEK) (https://cloud.google.com/kubernetes-engine/docs/how-to/using-cmek) used to encrypt the boot disk attached to each node in the node pool. Specify the key using the following format: projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key} + - !ruby/object:Api::Type::Boolean + name: 'spot' + description: | + Optional. Whether the nodes are created as Spot VM instances (https://cloud.google.com/compute/docs/instances/spot). Spot VMs are the latest update to legacy preemptible VMs. Spot VMs do not have a maximum lifetime. Legacy and Spot preemptible nodes cannot be used in a node pool with the CONTROLLER role or in the DEFAULT node pool if the CONTROLLER role is not assigned (the DEFAULT node pool will assume the CONTROLLER role). + - !ruby/object:Api::Type::Array + name: 'locations' + description: | + Optional. The list of Compute Engine zones (https://cloud.google.com/compute/docs/zones#available) where node pool nodes associated with a Dataproc on GKE virtual cluster will be located.Note: All node pools associated with a virtual cluster must be located in the same region as the virtual cluster, and they must be located in the same zone within that region.If a location is not specified during node pool creation, Dataproc on GKE will choose the zone. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'autoscaling' + description: | + GkeNodePoolAutoscaling contains information the cluster autoscaler needs to adjust the size of the node pool to the current cluster usage. + properties: + - !ruby/object:Api::Type::Integer + name: 'minNodeCount' + description: | + The minimum number of nodes in the node pool. Must be >= 0 and <= max_node_count. + - !ruby/object:Api::Type::Integer + name: 'maxNodeCount' + description: | + The maximum number of nodes in the node pool. Must be >= min_node_count, and must be > 0. Note: Quota must be sufficient to scale up the cluster. + - !ruby/object:Api::Type::NestedObject + name: 'dataprocMetricConfig' + description: | + Dataproc metric config. + properties: + - !ruby/object:Api::Type::Array + name: 'metrics' + description: | + Required. Metrics sources to enable. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::Enum + name: 'metricSource' + description: | + Required. A standard set of metrics is collected unless metricOverrides are specified for the metric source (see Custom metrics (https://cloud.google.com/dataproc/docs/guides/dataproc-metrics#custom_metrics) for more information). + values: + - :METRIC_SOURCE_UNSPECIFIED + - :MONITORING_AGENT_DEFAULTS + - :HDFS + - :SPARK + - :YARN + - :SPARK_HISTORY_SERVER + - :HIVESERVER2 + - :HIVEMETASTORE + - :FLINK + - !ruby/object:Api::Type::Array + name: 'metricOverrides' + description: | + Optional. Specify one or more Custom metrics (https://cloud.google.com/dataproc/docs/guides/dataproc-metrics#custom_metrics) to collect for the metric course (for the SPARK metric source (any Spark metric (https://spark.apache.org/docs/latest/monitoring.html#metrics) can be specified).Provide metrics in the following format: METRIC_SOURCE: INSTANCE:GROUP:METRIC Use camelcase as appropriate.Examples: yarn:ResourceManager:QueueMetrics:AppsCompleted spark:driver:DAGScheduler:job.allJobs sparkHistoryServer:JVM:Memory:NonHeapMemoryUsage.committed hiveserver2:JVM:Memory:NonHeapMemoryUsage.used Notes: Only the specified overridden metrics are collected for the metric source. For example, if one or more spark:executive metrics are listed as metric overrides, other SPARK metrics are not collected. The collection of the metrics for other enabled custom metric sources is unaffected. For example, if both SPARK andd YARN metric sources are enabled, and overrides are provided for Spark metrics only, all YARN metrics are collected. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'auxiliaryNodeGroups' + description: | + Optional. The node group settings. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::NestedObject + name: 'nodeGroup' + description: | + Dataproc Node Group. The Dataproc NodeGroup resource is not related to the Dataproc NodeGroupAffinity resource. + properties: + - !ruby/object:Api::Type::String + name: 'name' + description: | + The Node group resource name (https://aip.dev/122). + - !ruby/object:Api::Type::Array + name: 'roles' + description: | + Required. Node group roles. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'nodeGroupConfig' + description: | + The config settings for Compute Engine resources in an instance group, such as a master or worker group. + properties: + - !ruby/object:Api::Type::Integer + name: 'numInstances' + description: | + Optional. The number of VM instances in the instance group. For HA cluster master_config groups, must be set to 3. For standard cluster master_config groups, must be set to 1. + - !ruby/object:Api::Type::Array + name: 'instanceNames' + description: | + Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'instanceReferences' + description: | + Output only. List of references to Compute Engine instances. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'instanceName' + description: | + The user-friendly name of the Compute Engine instance. + - !ruby/object:Api::Type::String + name: 'instanceId' + description: | + The unique identifier of the Compute Engine instance. + - !ruby/object:Api::Type::String + name: 'publicKey' + description: | + The public RSA key used for sharing data with this instance. + - !ruby/object:Api::Type::String + name: 'publicEciesKey' + description: | + The public ECIES key used for sharing data with this instance. + - !ruby/object:Api::Type::String + name: 'imageUri' + description: | + Optional. The Compute Engine image resource used for cluster instances.The URI can represent an image or image family.Image examples: https://www.googleapis.com/compute/v1/projects/[project_id]/global/images/[image-id] projects/[project_id]/global/images/[image-id] image-idImage family examples. Dataproc will use the most recent image from the family: https://www.googleapis.com/compute/v1/projects/[project_id]/global/images/family/[custom-image-family-name] projects/[project_id]/global/images/family/[custom-image-family-name]If the URI is unspecified, it will be inferred from SoftwareConfig.image_version or the system default. + - !ruby/object:Api::Type::String + name: 'machineTypeUri' + description: | + Optional. The Compute Engine machine type used for cluster instances.A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/machineTypes/n1-standard-2 projects/[project_id]/zones/[zone]/machineTypes/n1-standard-2 n1-standard-2Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, n1-standard-2. + - !ruby/object:Api::Type::NestedObject + name: 'diskConfig' + description: | + Specifies the config of disk options for a group of VM instances. + properties: + - !ruby/object:Api::Type::String + name: 'bootDiskType' + description: | + Optional. Type of the boot disk (default is "pd-standard"). Valid values: "pd-balanced" (Persistent Disk Balanced Solid State Drive), "pd-ssd" (Persistent Disk Solid State Drive), or "pd-standard" (Persistent Disk Hard Disk Drive). See Disk types (https://cloud.google.com/compute/docs/disks#disk-types). + - !ruby/object:Api::Type::Integer + name: 'bootDiskSizeGb' + description: | + Optional. Size in GB of the boot disk (default is 500GB). + - !ruby/object:Api::Type::Integer + name: 'numLocalSsds' + description: | + Optional. Number of attached SSDs, from 0 to 8 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and HDFS (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.Note: Local SSD options may vary by machine type and number of vCPUs selected. + - !ruby/object:Api::Type::String + name: 'localSsdInterface' + description: | + Optional. Interface type of local SSDs (default is "scsi"). Valid values: "scsi" (Small Computer System Interface), "nvme" (Non-Volatile Memory Express). See local SSD performance (https://cloud.google.com/compute/docs/disks/local-ssd#performance). + - !ruby/object:Api::Type::Boolean + name: 'isPreemptible' + description: | + Output only. Specifies that this instance group contains preemptible instances. + - !ruby/object:Api::Type::Enum + name: 'preemptibility' + description: | + Optional. Specifies the preemptibility of the instance group.The default value for master and worker groups is NON_PREEMPTIBLE. This default cannot be changed.The default value for secondary instances is PREEMPTIBLE. + values: + - :PREEMPTIBILITY_UNSPECIFIED + - :NON_PREEMPTIBLE + - :PREEMPTIBLE + - :SPOT + - !ruby/object:Api::Type::NestedObject + name: 'managedGroupConfig' + description: | + Specifies the resources used to actively manage an instance group. + properties: + - !ruby/object:Api::Type::String + name: 'instanceTemplateName' + description: | + Output only. The name of the Instance Template used for the Managed Instance Group. + - !ruby/object:Api::Type::String + name: 'instanceGroupManagerName' + description: | + Output only. The name of the Instance Group Manager for this group. + - !ruby/object:Api::Type::String + name: 'instanceGroupManagerUri' + description: | + Output only. The partial URI to the instance group manager for this group. E.g. projects/my-project/regions/us-central1/instanceGroupManagers/my-igm. + - !ruby/object:Api::Type::Array + name: 'accelerators' + description: | + Optional. The Compute Engine accelerator configuration for these instances. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'acceleratorTypeUri' + description: | + Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. + - !ruby/object:Api::Type::Integer + name: 'acceleratorCount' + description: | + The number of the accelerator cards of this type exposed to this instance. + - !ruby/object:Api::Type::String + name: 'minCpuPlatform' + description: | + Optional. Specifies the minimum cpu platform for the Instance Group. See Dataproc -> Minimum CPU Platform (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). + - !ruby/object:Api::Type::Integer + name: 'minNumInstances' + description: | + Optional. The minimum number of primary worker instances to create. If min_num_instances is set, cluster creation will succeed if the number of primary workers created is at least equal to the min_num_instances number.Example: Cluster creation request with num_instances = 5 and min_num_instances = 3: If 4 VMs are created and 1 instance fails, the failed VM is deleted. The cluster is resized to 4 instances and placed in a RUNNING state. If 2 instances are created and 3 instances fail, the cluster in placed in an ERROR state. The failed VMs are not deleted. + - !ruby/object:Api::Type::NestedObject + name: 'instanceFlexibilityPolicy' + description: | + Instance flexibility Policy allowing a mixture of VM shapes and provisioning models. + properties: + - !ruby/object:Api::Type::Array + name: 'instanceSelectionList' + description: | + Optional. List of instance selection options that the group will use when creating new VMs. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::Array + name: 'machineTypes' + description: | + Optional. Full machine-type names, e.g. "n1-standard-16". + item_type: Api::Type::String + - !ruby/object:Api::Type::Integer + name: 'rank' + description: | + Optional. Preference of this instance selection. Lower number means higher preference. Dataproc will first try to create a VM based on the machine-type with priority rank and fallback to next rank based on availability. Machine types and instance selections with the same priority have the same preference. + - !ruby/object:Api::Type::Array + name: 'instanceSelectionResults' + description: | + Output only. A list of instance selection results in the group. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'machineType' + description: | + Output only. Full machine-type names, e.g. "n1-standard-16". + - !ruby/object:Api::Type::Integer + name: 'vmCount' + description: | + Output only. Number of VM provisioned with the machine_type. + - !ruby/object:Api::Type::NestedObject + name: 'startupConfig' + description: | + Configuration to handle the startup of instances during cluster create and update process. + properties: + - !ruby/object:Api::Type::Integer + name: 'requiredRegistrationFraction' + description: | + Optional. The config setting to enable cluster creation/ updation to be successful only after required_registration_fraction of instances are up and running. This configuration is applicable to only secondary workers for now. The cluster will fail if required_registration_fraction of instances are not available. This will include instance creation, agent registration, and service registration (if enabled). + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + Optional. Node group labels. Label keys must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty. If specified, they must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). The node group must have no more than 32 labelsn. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::String + name: 'nodeGroupId' + description: | + Optional. A node group ID. Generated if not specified.The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of from 3 to 33 characters. + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + Optional. The labels to associate with this cluster.Label keys must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: \p{Ll}\p{Lo}{0,62}Label values must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: \p{Ll}\p{Lo}\p{N}_-{0,63}No more than 32 labels can be associated with a given cluster. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'clusterSelector' + description: | + A selector that chooses target cluster for jobs based on metadata. + properties: + - !ruby/object:Api::Type::String + name: 'zone' + description: | + Optional. The zone where workflow process executes. This parameter does not affect the selection of the cluster.If unspecified, the zone of the first cluster matching the selector is used. + - !ruby/object:Api::Type::NestedObject + name: 'clusterLabels' + description: | + Required. The cluster labels. Cluster must have all labels to match. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::Array + name: 'jobs' + description: | + Required. The Directed Acyclic Graph of Jobs to submit. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'stepId' + description: | + Required. The step id. The id must be unique among all jobs within the template.The step id is used as prefix for job id, as job goog-dataproc-workflow-step-id label, and in prerequisiteStepIds field from other steps.The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between 3 and 50 characters. + - !ruby/object:Api::Type::NestedObject + name: 'hadoopJob' + description: | + A Dataproc job for running Apache Hadoop MapReduce (https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html) jobs on Apache Hadoop YARN (https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html). + properties: + - !ruby/object:Api::Type::String + name: 'mainJarFileUri' + description: | + The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar' + - !ruby/object:Api::Type::String + name: 'mainClass' + description: | + The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in jar_file_uris. + - !ruby/object:Api::Type::Array + name: 'args' + description: | + Optional. The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision might occur that causes an incorrect job submission. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'jarFileUris' + description: | + Optional. Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'fileUris' + description: | + Optional. HCFS (Hadoop Compatible Filesystem) URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'archiveUris' + description: | + Optional. HCFS URIs of archives to be extracted in the working directory of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .zip. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'properties' + description: | + Optional. A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes in user code. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'loggingConfig' + description: | + The runtime logging config of the job. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'driverLogLevels' + description: | + The per-package log levels for the driver. This can include "root" package name to configure rootLogger. Examples: - 'com.google = FATAL' - 'root = INFO' - 'org.apache = DEBUG' + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'sparkJob' + description: | + A Dataproc job for running Apache Spark (https://spark.apache.org/) applications on YARN. + properties: + - !ruby/object:Api::Type::String + name: 'mainJarFileUri' + description: | + The HCFS URI of the jar file that contains the main class. + - !ruby/object:Api::Type::String + name: 'mainClass' + description: | + The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in SparkJob.jar_file_uris. + - !ruby/object:Api::Type::Array + name: 'args' + description: | + Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'jarFileUris' + description: | + Optional. HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'fileUris' + description: | + Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'archiveUris' + description: | + Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'properties' + description: | + Optional. A mapping of property names to values, used to configure Spark. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'loggingConfig' + description: | + The runtime logging config of the job. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'driverLogLevels' + description: | + The per-package log levels for the driver. This can include "root" package name to configure rootLogger. Examples: - 'com.google = FATAL' - 'root = INFO' - 'org.apache = DEBUG' + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'pysparkJob' + description: | + A Dataproc job for running Apache PySpark (https://spark.apache.org/docs/0.9.0/python-programming-guide.html) applications on YARN. + properties: + - !ruby/object:Api::Type::String + name: 'mainPythonFileUri' + description: | + Required. The HCFS URI of the main Python file to use as the driver. Must be a .py file. + - !ruby/object:Api::Type::Array + name: 'args' + description: | + Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'pythonFileUris' + description: | + Optional. HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'jarFileUris' + description: | + Optional. HCFS URIs of jar files to add to the CLASSPATHs of the Python driver and tasks. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'fileUris' + description: | + Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'archiveUris' + description: | + Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'properties' + description: | + Optional. A mapping of property names to values, used to configure PySpark. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'loggingConfig' + description: | + The runtime logging config of the job. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'driverLogLevels' + description: | + The per-package log levels for the driver. This can include "root" package name to configure rootLogger. Examples: - 'com.google = FATAL' - 'root = INFO' - 'org.apache = DEBUG' + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'hiveJob' + description: | + A Dataproc job for running Apache Hive (https://hive.apache.org/) queries on YARN. + properties: + - !ruby/object:Api::Type::String + name: 'queryFileUri' + description: | + The HCFS URI of the script that contains Hive queries. + - !ruby/object:Api::Type::NestedObject + name: 'queryList' + description: | + A list of queries to run on a cluster. + properties: + - !ruby/object:Api::Type::Array + name: 'queries' + description: | + Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": [ "query1", "query2", "query3;query4", ] } } + item_type: Api::Type::String + - !ruby/object:Api::Type::Boolean + name: 'continueOnFailure' + description: | + Optional. Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries. + - !ruby/object:Api::Type::NestedObject + name: 'scriptVariables' + description: | + Optional. Mapping of query variable names to values (equivalent to the Hive command: SET name="value";). + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'properties' + description: | + Optional. A mapping of property names and values, used to configure Hive. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/hive/conf/hive-site.xml, and classes in user code. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::Array + name: 'jarFileUris' + description: | + Optional. HCFS URIs of jar files to add to the CLASSPATH of the Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes and UDFs. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'pigJob' + description: | + A Dataproc job for running Apache Pig (https://pig.apache.org/) queries on YARN. + properties: + - !ruby/object:Api::Type::String + name: 'queryFileUri' + description: | + The HCFS URI of the script that contains the Pig queries. + - !ruby/object:Api::Type::NestedObject + name: 'queryList' + description: | + A list of queries to run on a cluster. + properties: + - !ruby/object:Api::Type::Array + name: 'queries' + description: | + Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": [ "query1", "query2", "query3;query4", ] } } + item_type: Api::Type::String + - !ruby/object:Api::Type::Boolean + name: 'continueOnFailure' + description: | + Optional. Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries. + - !ruby/object:Api::Type::NestedObject + name: 'scriptVariables' + description: | + Optional. Mapping of query variable names to values (equivalent to the Pig command: name=[value]). + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'properties' + description: | + Optional. A mapping of property names to values, used to configure Pig. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/pig/conf/pig.properties, and classes in user code. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::Array + name: 'jarFileUris' + description: | + Optional. HCFS URIs of jar files to add to the CLASSPATH of the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'loggingConfig' + description: | + The runtime logging config of the job. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'driverLogLevels' + description: | + The per-package log levels for the driver. This can include "root" package name to configure rootLogger. Examples: - 'com.google = FATAL' - 'root = INFO' - 'org.apache = DEBUG' + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'sparkRJob' + description: | + A Dataproc job for running Apache SparkR (https://spark.apache.org/docs/latest/sparkr.html) applications on YARN. + properties: + - !ruby/object:Api::Type::String + name: 'mainRFileUri' + description: | + Required. The HCFS URI of the main R file to use as the driver. Must be a .R file. + - !ruby/object:Api::Type::Array + name: 'args' + description: | + Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'fileUris' + description: | + Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'archiveUris' + description: | + Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'properties' + description: | + Optional. A mapping of property names to values, used to configure SparkR. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'loggingConfig' + description: | + The runtime logging config of the job. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'driverLogLevels' + description: | + The per-package log levels for the driver. This can include "root" package name to configure rootLogger. Examples: - 'com.google = FATAL' - 'root = INFO' - 'org.apache = DEBUG' + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'sparkSqlJob' + description: | + A Dataproc job for running Apache Spark SQL (https://spark.apache.org/sql/) queries. + properties: + - !ruby/object:Api::Type::String + name: 'queryFileUri' + description: | + The HCFS URI of the script that contains SQL queries. + - !ruby/object:Api::Type::NestedObject + name: 'queryList' + description: | + A list of queries to run on a cluster. + properties: + - !ruby/object:Api::Type::Array + name: 'queries' + description: | + Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": [ "query1", "query2", "query3;query4", ] } } + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'scriptVariables' + description: | + Optional. Mapping of query variable names to values (equivalent to the Spark SQL command: SET name="value";). + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'properties' + description: | + Optional. A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Dataproc API might be overwritten. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::Array + name: 'jarFileUris' + description: | + Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'loggingConfig' + description: | + The runtime logging config of the job. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'driverLogLevels' + description: | + The per-package log levels for the driver. This can include "root" package name to configure rootLogger. Examples: - 'com.google = FATAL' - 'root = INFO' - 'org.apache = DEBUG' + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'prestoJob' + description: | + A Dataproc job for running Presto (https://prestosql.io/) queries. IMPORTANT: The Dataproc Presto Optional Component (https://cloud.google.com/dataproc/docs/concepts/components/presto) must be enabled when the cluster is created to submit a Presto job to the cluster. + properties: + - !ruby/object:Api::Type::String + name: 'queryFileUri' + description: | + The HCFS URI of the script that contains SQL queries. + - !ruby/object:Api::Type::NestedObject + name: 'queryList' + description: | + A list of queries to run on a cluster. + properties: + - !ruby/object:Api::Type::Array + name: 'queries' + description: | + Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": [ "query1", "query2", "query3;query4", ] } } + item_type: Api::Type::String + - !ruby/object:Api::Type::Boolean + name: 'continueOnFailure' + description: | + Optional. Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries. + - !ruby/object:Api::Type::String + name: 'outputFormat' + description: | + Optional. The format in which query output will be displayed. See the Presto documentation for supported output formats + - !ruby/object:Api::Type::Array + name: 'clientTags' + description: | + Optional. Presto client tags to attach to this query + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'properties' + description: | + Optional. A mapping of property names to values. Used to set Presto session properties (https://prestodb.io/docs/current/sql/set-session.html) Equivalent to using the --session flag in the Presto CLI + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'loggingConfig' + description: | + The runtime logging config of the job. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'driverLogLevels' + description: | + The per-package log levels for the driver. This can include "root" package name to configure rootLogger. Examples: - 'com.google = FATAL' - 'root = INFO' - 'org.apache = DEBUG' + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'trinoJob' + description: | + A Dataproc job for running Trino (https://trino.io/) queries. IMPORTANT: The Dataproc Trino Optional Component (https://cloud.google.com/dataproc/docs/concepts/components/trino) must be enabled when the cluster is created to submit a Trino job to the cluster. + properties: + - !ruby/object:Api::Type::String + name: 'queryFileUri' + description: | + The HCFS URI of the script that contains SQL queries. + - !ruby/object:Api::Type::NestedObject + name: 'queryList' + description: | + A list of queries to run on a cluster. + properties: + - !ruby/object:Api::Type::Array + name: 'queries' + description: | + Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": [ "query1", "query2", "query3;query4", ] } } + item_type: Api::Type::String + - !ruby/object:Api::Type::Boolean + name: 'continueOnFailure' + description: | + Optional. Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries. + - !ruby/object:Api::Type::String + name: 'outputFormat' + description: | + Optional. The format in which query output will be displayed. See the Trino documentation for supported output formats + - !ruby/object:Api::Type::Array + name: 'clientTags' + description: | + Optional. Trino client tags to attach to this query + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'properties' + description: | + Optional. A mapping of property names to values. Used to set Trino session properties (https://trino.io/docs/current/sql/set-session.html) Equivalent to using the --session flag in the Trino CLI + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'loggingConfig' + description: | + The runtime logging config of the job. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'driverLogLevels' + description: | + The per-package log levels for the driver. This can include "root" package name to configure rootLogger. Examples: - 'com.google = FATAL' - 'root = INFO' - 'org.apache = DEBUG' + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'flinkJob' + description: | + A Dataproc job for running Apache Flink applications on YARN. + properties: + - !ruby/object:Api::Type::String + name: 'mainJarFileUri' + description: | + The HCFS URI of the jar file that contains the main class. + - !ruby/object:Api::Type::String + name: 'mainClass' + description: | + The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in jarFileUris. + - !ruby/object:Api::Type::Array + name: 'args' + description: | + Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision might occur that causes an incorrect job submission. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'jarFileUris' + description: | + Optional. HCFS URIs of jar files to add to the CLASSPATHs of the Flink driver and tasks. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'savepointUri' + description: | + Optional. HCFS URI of the savepoint, which contains the last saved progress for starting the current job. + - !ruby/object:Api::Type::NestedObject + name: 'properties' + description: | + Optional. A mapping of property names to values, used to configure Flink. Properties that conflict with values set by the Dataproc API might beoverwritten. Can include properties set in/etc/flink/conf/flink-defaults.conf and classes in user code. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'loggingConfig' + description: | + The runtime logging config of the job. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'driverLogLevels' + description: | + The per-package log levels for the driver. This can include "root" package name to configure rootLogger. Examples: - 'com.google = FATAL' - 'root = INFO' - 'org.apache = DEBUG' + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + Optional. The labels to associate with this job.Label keys must be between 1 and 63 characters long, and must conform to the following regular expression: \p{Ll}\p{Lo}{0,62}Label values must be between 1 and 63 characters long, and must conform to the following regular expression: \p{Ll}\p{Lo}\p{N}_-{0,63}No more than 32 labels can be associated with a given job. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'scheduling' + description: | + Job scheduling options. + properties: + - !ruby/object:Api::Type::Integer + name: 'maxFailuresPerHour' + description: | + Optional. Maximum number of times per hour a driver can be restarted as a result of driver exiting with non-zero code before job is reported failed.A job might be reported as thrashing if the driver exits with a non-zero code four times within a 10-minute window.Maximum value is 10.Note: This restartable job option is not supported in Dataproc workflow templates (https://cloud.google.com/dataproc/docs/concepts/workflows/using-workflows#adding_jobs_to_a_template). + - !ruby/object:Api::Type::Integer + name: 'maxFailuresTotal' + description: | + Optional. Maximum total number of times a driver can be restarted as a result of the driver exiting with a non-zero code. After the maximum number is reached, the job will be reported as failed.Maximum value is 240.Note: Currently, this restartable job option is not supported in Dataproc workflow templates (https://cloud.google.com/dataproc/docs/concepts/workflows/using-workflows#adding_jobs_to_a_template). + - !ruby/object:Api::Type::Array + name: 'prerequisiteStepIds' + description: | + Optional. The optional list of prerequisite job step_ids. If not specified, the job will start at the beginning of workflow. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'parameters' + description: | + Optional. Template parameters whose values are substituted into the template. Values for parameters must be provided when the template is instantiated. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'name' + description: | + Required. Parameter name. The parameter name is used as the key, and paired with the parameter value, which are passed to the template when the template is instantiated. The name must contain only capital letters (A-Z), numbers (0-9), and underscores (_), and must not start with a number. The maximum length is 40 characters. + - !ruby/object:Api::Type::Array + name: 'fields' + description: | + Required. Paths to all fields that the parameter replaces. A field is allowed to appear in at most one parameter's list of field paths.A field path is similar in syntax to a google.protobuf.FieldMask. For example, a field path that references the zone field of a workflow template's cluster selector would be specified as placement.clusterSelector.zone.Also, field paths can reference fields using the following syntax: Values in maps can be referenced by key: labels'key' placement.clusterSelector.clusterLabels'key' placement.managedCluster.labels'key' placement.clusterSelector.clusterLabels'key' jobs'step-id'.labels'key' Jobs in the jobs list can be referenced by step-id: jobs'step-id'.hadoopJob.mainJarFileUri jobs'step-id'.hiveJob.queryFileUri jobs'step-id'.pySparkJob.mainPythonFileUri jobs'step-id'.hadoopJob.jarFileUris0 jobs'step-id'.hadoopJob.archiveUris0 jobs'step-id'.hadoopJob.fileUris0 jobs'step-id'.pySparkJob.pythonFileUris0 Items in repeated fields can be referenced by a zero-based index: jobs'step-id'.sparkJob.args0 Other examples: jobs'step-id'.hadoopJob.properties'key' jobs'step-id'.hadoopJob.args0 jobs'step-id'.hiveJob.scriptVariables'key' jobs'step-id'.hadoopJob.mainJarFileUri placement.clusterSelector.zoneIt may not be possible to parameterize maps and repeated fields in their entirety since only individual map values and individual items in repeated fields can be referenced. For example, the following field paths are invalid: placement.clusterSelector.clusterLabels jobs'step-id'.sparkJob.args + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'description' + description: | + Optional. Brief description of the parameter. Must not exceed 1024 characters. + - !ruby/object:Api::Type::NestedObject + name: 'validation' + description: | + Configuration for parameter validation. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'regex' + description: | + Validation based on regular expressions. + properties: + - !ruby/object:Api::Type::Array + name: 'regexes' + description: | + Required. RE2 regular expressions used to validate the parameter's value. The value must match the regex in its entirety (substring matches are not sufficient). + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'values' + description: | + Validation based on a list of allowed values. + properties: + - !ruby/object:Api::Type::Array + name: 'values' + description: | + Required. List of allowed values for the parameter. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'dagTimeout' + description: | + Optional. Timeout duration for the DAG of jobs, expressed in seconds (see JSON representation of duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). The timeout duration must be from 10 minutes ("600s") to 24 hours ("86400s"). The timer begins when the first job is submitted. If the workflow is running at the end of the timeout period, any remaining jobs are cancelled, the workflow is ended, and if the workflow was running on a managed cluster, the cluster is deleted. + - !ruby/object:Api::Type::NestedObject + name: 'encryptionConfig' + description: | + Encryption settings for encrypting workflow template job arguments. + properties: + - !ruby/object:Api::Type::String + name: 'kmsKey' + description: | + Optional. The Cloud KMS key name to use for encrypting workflow template job arguments.When this this key is provided, the following workflow template job arguments (https://cloud.google.com/dataproc/docs/concepts/workflows/use-workflows#adding_jobs_to_a_template), if present, are CMEK encrypted (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/customer-managed-encryption#use_cmek_with_workflow_template_data): FlinkJob args (https://cloud.google.com/dataproc/docs/reference/rest/v1/FlinkJob) HadoopJob args (https://cloud.google.com/dataproc/docs/reference/rest/v1/HadoopJob) SparkJob args (https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkJob) SparkRJob args (https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkRJob) PySparkJob args (https://cloud.google.com/dataproc/docs/reference/rest/v1/PySparkJob) SparkSqlJob (https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkSqlJob) scriptVariables and queryList.queries HiveJob (https://cloud.google.com/dataproc/docs/reference/rest/v1/HiveJob) scriptVariables and queryList.queries PigJob (https://cloud.google.com/dataproc/docs/reference/rest/v1/PigJob) scriptVariables and queryList.queries PrestoJob (https://cloud.google.com/dataproc/docs/reference/rest/v1/PrestoJob) scriptVariables and queryList.queries + + + + + - !ruby/object:Api::Resource + name: ProjectLocationWorkflowTemplate + base_url: '{{+parent}}/workflowTemplates' + self_link: '{{+name}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/dataproc/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + A Dataproc workflow template resource. + properties: + + - !ruby/object:Api::Type::String + name: 'id' + description: | + + - !ruby/object:Api::Type::String + name: 'name' + description: | + Output only. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} For projects.locations.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id} + - !ruby/object:Api::Type::Integer + name: 'version' + description: | + Optional. Used to perform a consistent read-modify-write.This field should be left blank for a CreateWorkflowTemplate request. It is required for an UpdateWorkflowTemplate request, and must match the current server version. A typical update template flow would fetch the current template with a GetWorkflowTemplate request, which will return the current template with the version field filled in with the current server version. The user updates other fields in the template, then returns it as part of the UpdateWorkflowTemplate request. + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Output only. The time template was created. + - !ruby/object:Api::Type::String + name: 'updateTime' + description: | + Output only. The time template was last updated. + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + Optional. The labels to associate with this template. These labels will be propagated to all jobs and clusters created by the workflow instance.Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt).Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt).No more than 32 labels can be associated with a template. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'placement' + description: | + Specifies workflow execution target.Either managed_cluster or cluster_selector is required. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'managedCluster' + description: | + Cluster that is managed by the workflow. + properties: + - !ruby/object:Api::Type::String + name: 'clusterName' + description: | + Required. The cluster name prefix. A unique cluster name will be formed by appending a random suffix.The name must contain only lower-case letters (a-z), numbers (0-9), and hyphens (-). Must begin with a letter. Cannot begin or end with hyphen. Must consist of between 2 and 35 characters. + - !ruby/object:Api::Type::NestedObject + name: 'config' + description: | + The cluster config. + properties: + - !ruby/object:Api::Type::String + name: 'configBucket' + description: | + Optional. A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see Dataproc staging and temp buckets (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket. + - !ruby/object:Api::Type::String + name: 'tempBucket' + description: | + Optional. A Cloud Storage bucket used to store ephemeral cluster and jobs data, such as Spark and MapReduce history files. If you do not specify a temp bucket, Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's temp bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket. The default bucket has a TTL of 90 days, but you can use any TTL (or none) if you specify a bucket (see Dataproc staging and temp buckets (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket. + - !ruby/object:Api::Type::NestedObject + name: 'gceClusterConfig' + description: | + Common config settings for resources of Compute Engine cluster instances, applicable to all instances in the cluster. + properties: + - !ruby/object:Api::Type::String + name: 'zoneUri' + description: | + Optional. The Compute Engine zone where the Dataproc cluster will be located. If omitted, the service will pick a zone in the cluster's Compute Engine region. On a get request, zone will always be present.A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone] projects/[project_id]/zones/[zone] [zone] + - !ruby/object:Api::Type::String + name: 'networkUri' + description: | + Optional. The Compute Engine network to be used for machine communications. Cannot be specified with subnetwork_uri. If neither network_uri nor subnetwork_uri is specified, the "default" network of the project is used, if it exists. Cannot be a "Custom Subnet Network" (see Using Subnetworks (https://cloud.google.com/compute/docs/subnetworks) for more information).A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/global/networks/default projects/[project_id]/global/networks/default default + - !ruby/object:Api::Type::String + name: 'subnetworkUri' + description: | + Optional. The Compute Engine subnetwork to be used for machine communications. Cannot be specified with network_uri.A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/regions/[region]/subnetworks/sub0 projects/[project_id]/regions/[region]/subnetworks/sub0 sub0 + - !ruby/object:Api::Type::Boolean + name: 'internalIpOnly' + description: | + Optional. This setting applies to subnetwork-enabled networks. It is set to true by default in clusters created with image versions 2.2.x.When set to true: All cluster VMs have internal IP addresses. Google Private Access (https://cloud.google.com/vpc/docs/private-google-access) must be enabled to access Dataproc and other Google Cloud APIs. Off-cluster dependencies must be configured to be accessible without external IP addresses.When set to false: Cluster VMs are not restricted to internal IP addresses. Ephemeral external IP addresses are assigned to each cluster VM. + - !ruby/object:Api::Type::Enum + name: 'privateIpv6GoogleAccess' + description: | + Optional. The type of IPv6 access for a cluster. + values: + - :PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED + - :INHERIT_FROM_SUBNETWORK + - :OUTBOUND + - :BIDIRECTIONAL + - !ruby/object:Api::Type::String + name: 'serviceAccount' + description: | + Optional. The Dataproc service account (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/service-accounts#service_accounts_in_dataproc) (also see VM Data Plane identity (https://cloud.google.com/dataproc/docs/concepts/iam/dataproc-principals#vm_service_account_data_plane_identity)) used by Dataproc cluster VM instances to access Google Cloud Platform services.If not specified, the Compute Engine default service account (https://cloud.google.com/compute/docs/access/service-accounts#default_service_account) is used. + - !ruby/object:Api::Type::Array + name: 'serviceAccountScopes' + description: | + Optional. The URIs of service account scopes to be included in Compute Engine instances. The following base set of scopes is always included: https://www.googleapis.com/auth/cloud.useraccounts.readonly https://www.googleapis.com/auth/devstorage.read_write https://www.googleapis.com/auth/logging.writeIf no scopes are specified, the following defaults are also provided: https://www.googleapis.com/auth/bigquery https://www.googleapis.com/auth/bigtable.admin.table https://www.googleapis.com/auth/bigtable.data https://www.googleapis.com/auth/devstorage.full_control + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'tags' + description: | + The Compute Engine tags to add to all instances (see Tagging instances (https://cloud.google.com/compute/docs/label-or-tag-resources#tags)). + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'metadata' + description: | + Optional. The Compute Engine metadata entries to add to all instances (see Project and instance metadata (https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)). + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'reservationAffinity' + description: | + Reservation Affinity for consuming Zonal reservation. + properties: + - !ruby/object:Api::Type::Enum + name: 'consumeReservationType' + description: | + Optional. Type of reservation to consume + values: + - :TYPE_UNSPECIFIED + - :NO_RESERVATION + - :ANY_RESERVATION + - :SPECIFIC_RESERVATION + - !ruby/object:Api::Type::String + name: 'key' + description: | + Optional. Corresponds to the label key of reservation resource. + - !ruby/object:Api::Type::Array + name: 'values' + description: | + Optional. Corresponds to the label values of reservation resource. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'nodeGroupAffinity' + description: | + Node Group Affinity for clusters using sole-tenant node groups. The Dataproc NodeGroupAffinity resource is not related to the Dataproc NodeGroup resource. + properties: + - !ruby/object:Api::Type::String + name: 'nodeGroupUri' + description: | + Required. The URI of a sole-tenant node group resource (https://cloud.google.com/compute/docs/reference/rest/v1/nodeGroups) that the cluster will be created on.A full URL, partial URI, or node group name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/nodeGroups/node-group-1 projects/[project_id]/zones/[zone]/nodeGroups/node-group-1 node-group-1 + - !ruby/object:Api::Type::NestedObject + name: 'shieldedInstanceConfig' + description: | + Shielded Instance Config for clusters using Compute Engine Shielded VMs (https://cloud.google.com/security/shielded-cloud/shielded-vm). + properties: + - !ruby/object:Api::Type::Boolean + name: 'enableSecureBoot' + description: | + Optional. Defines whether instances have Secure Boot enabled. + - !ruby/object:Api::Type::Boolean + name: 'enableVtpm' + description: | + Optional. Defines whether instances have the vTPM enabled. + - !ruby/object:Api::Type::Boolean + name: 'enableIntegrityMonitoring' + description: | + Optional. Defines whether instances have integrity monitoring enabled. + - !ruby/object:Api::Type::NestedObject + name: 'confidentialInstanceConfig' + description: | + Confidential Instance Config for clusters using Confidential VMs (https://cloud.google.com/compute/confidential-vm/docs) + properties: + - !ruby/object:Api::Type::Boolean + name: 'enableConfidentialCompute' + description: | + Optional. Defines whether the instance should have confidential compute enabled. + - !ruby/object:Api::Type::NestedObject + name: 'masterConfig' + description: | + The config settings for Compute Engine resources in an instance group, such as a master or worker group. + properties: + - !ruby/object:Api::Type::Integer + name: 'numInstances' + description: | + Optional. The number of VM instances in the instance group. For HA cluster master_config groups, must be set to 3. For standard cluster master_config groups, must be set to 1. + - !ruby/object:Api::Type::Array + name: 'instanceNames' + description: | + Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'instanceReferences' + description: | + Output only. List of references to Compute Engine instances. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'instanceName' + description: | + The user-friendly name of the Compute Engine instance. + - !ruby/object:Api::Type::String + name: 'instanceId' + description: | + The unique identifier of the Compute Engine instance. + - !ruby/object:Api::Type::String + name: 'publicKey' + description: | + The public RSA key used for sharing data with this instance. + - !ruby/object:Api::Type::String + name: 'publicEciesKey' + description: | + The public ECIES key used for sharing data with this instance. + - !ruby/object:Api::Type::String + name: 'imageUri' + description: | + Optional. The Compute Engine image resource used for cluster instances.The URI can represent an image or image family.Image examples: https://www.googleapis.com/compute/v1/projects/[project_id]/global/images/[image-id] projects/[project_id]/global/images/[image-id] image-idImage family examples. Dataproc will use the most recent image from the family: https://www.googleapis.com/compute/v1/projects/[project_id]/global/images/family/[custom-image-family-name] projects/[project_id]/global/images/family/[custom-image-family-name]If the URI is unspecified, it will be inferred from SoftwareConfig.image_version or the system default. + - !ruby/object:Api::Type::String + name: 'machineTypeUri' + description: | + Optional. The Compute Engine machine type used for cluster instances.A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/machineTypes/n1-standard-2 projects/[project_id]/zones/[zone]/machineTypes/n1-standard-2 n1-standard-2Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, n1-standard-2. + - !ruby/object:Api::Type::NestedObject + name: 'diskConfig' + description: | + Specifies the config of disk options for a group of VM instances. + properties: + - !ruby/object:Api::Type::String + name: 'bootDiskType' + description: | + Optional. Type of the boot disk (default is "pd-standard"). Valid values: "pd-balanced" (Persistent Disk Balanced Solid State Drive), "pd-ssd" (Persistent Disk Solid State Drive), or "pd-standard" (Persistent Disk Hard Disk Drive). See Disk types (https://cloud.google.com/compute/docs/disks#disk-types). + - !ruby/object:Api::Type::Integer + name: 'bootDiskSizeGb' + description: | + Optional. Size in GB of the boot disk (default is 500GB). + - !ruby/object:Api::Type::Integer + name: 'numLocalSsds' + description: | + Optional. Number of attached SSDs, from 0 to 8 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and HDFS (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.Note: Local SSD options may vary by machine type and number of vCPUs selected. + - !ruby/object:Api::Type::String + name: 'localSsdInterface' + description: | + Optional. Interface type of local SSDs (default is "scsi"). Valid values: "scsi" (Small Computer System Interface), "nvme" (Non-Volatile Memory Express). See local SSD performance (https://cloud.google.com/compute/docs/disks/local-ssd#performance). + - !ruby/object:Api::Type::Boolean + name: 'isPreemptible' + description: | + Output only. Specifies that this instance group contains preemptible instances. + - !ruby/object:Api::Type::Enum + name: 'preemptibility' + description: | + Optional. Specifies the preemptibility of the instance group.The default value for master and worker groups is NON_PREEMPTIBLE. This default cannot be changed.The default value for secondary instances is PREEMPTIBLE. + values: + - :PREEMPTIBILITY_UNSPECIFIED + - :NON_PREEMPTIBLE + - :PREEMPTIBLE + - :SPOT + - !ruby/object:Api::Type::NestedObject + name: 'managedGroupConfig' + description: | + Specifies the resources used to actively manage an instance group. + properties: + - !ruby/object:Api::Type::String + name: 'instanceTemplateName' + description: | + Output only. The name of the Instance Template used for the Managed Instance Group. + - !ruby/object:Api::Type::String + name: 'instanceGroupManagerName' + description: | + Output only. The name of the Instance Group Manager for this group. + - !ruby/object:Api::Type::String + name: 'instanceGroupManagerUri' + description: | + Output only. The partial URI to the instance group manager for this group. E.g. projects/my-project/regions/us-central1/instanceGroupManagers/my-igm. + - !ruby/object:Api::Type::Array + name: 'accelerators' + description: | + Optional. The Compute Engine accelerator configuration for these instances. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'acceleratorTypeUri' + description: | + Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. + - !ruby/object:Api::Type::Integer + name: 'acceleratorCount' + description: | + The number of the accelerator cards of this type exposed to this instance. + - !ruby/object:Api::Type::String + name: 'minCpuPlatform' + description: | + Optional. Specifies the minimum cpu platform for the Instance Group. See Dataproc -> Minimum CPU Platform (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). + - !ruby/object:Api::Type::Integer + name: 'minNumInstances' + description: | + Optional. The minimum number of primary worker instances to create. If min_num_instances is set, cluster creation will succeed if the number of primary workers created is at least equal to the min_num_instances number.Example: Cluster creation request with num_instances = 5 and min_num_instances = 3: If 4 VMs are created and 1 instance fails, the failed VM is deleted. The cluster is resized to 4 instances and placed in a RUNNING state. If 2 instances are created and 3 instances fail, the cluster in placed in an ERROR state. The failed VMs are not deleted. + - !ruby/object:Api::Type::NestedObject + name: 'instanceFlexibilityPolicy' + description: | + Instance flexibility Policy allowing a mixture of VM shapes and provisioning models. + properties: + - !ruby/object:Api::Type::Array + name: 'instanceSelectionList' + description: | + Optional. List of instance selection options that the group will use when creating new VMs. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::Array + name: 'machineTypes' + description: | + Optional. Full machine-type names, e.g. "n1-standard-16". + item_type: Api::Type::String + - !ruby/object:Api::Type::Integer + name: 'rank' + description: | + Optional. Preference of this instance selection. Lower number means higher preference. Dataproc will first try to create a VM based on the machine-type with priority rank and fallback to next rank based on availability. Machine types and instance selections with the same priority have the same preference. + - !ruby/object:Api::Type::Array + name: 'instanceSelectionResults' + description: | + Output only. A list of instance selection results in the group. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'machineType' + description: | + Output only. Full machine-type names, e.g. "n1-standard-16". + - !ruby/object:Api::Type::Integer + name: 'vmCount' + description: | + Output only. Number of VM provisioned with the machine_type. + - !ruby/object:Api::Type::NestedObject + name: 'startupConfig' + description: | + Configuration to handle the startup of instances during cluster create and update process. + properties: + - !ruby/object:Api::Type::Integer + name: 'requiredRegistrationFraction' + description: | + Optional. The config setting to enable cluster creation/ updation to be successful only after required_registration_fraction of instances are up and running. This configuration is applicable to only secondary workers for now. The cluster will fail if required_registration_fraction of instances are not available. This will include instance creation, agent registration, and service registration (if enabled). + - !ruby/object:Api::Type::NestedObject + name: 'workerConfig' + description: | + The config settings for Compute Engine resources in an instance group, such as a master or worker group. + properties: + - !ruby/object:Api::Type::Integer + name: 'numInstances' + description: | + Optional. The number of VM instances in the instance group. For HA cluster master_config groups, must be set to 3. For standard cluster master_config groups, must be set to 1. + - !ruby/object:Api::Type::Array + name: 'instanceNames' + description: | + Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'instanceReferences' + description: | + Output only. List of references to Compute Engine instances. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'instanceName' + description: | + The user-friendly name of the Compute Engine instance. + - !ruby/object:Api::Type::String + name: 'instanceId' + description: | + The unique identifier of the Compute Engine instance. + - !ruby/object:Api::Type::String + name: 'publicKey' + description: | + The public RSA key used for sharing data with this instance. + - !ruby/object:Api::Type::String + name: 'publicEciesKey' + description: | + The public ECIES key used for sharing data with this instance. + - !ruby/object:Api::Type::String + name: 'imageUri' + description: | + Optional. The Compute Engine image resource used for cluster instances.The URI can represent an image or image family.Image examples: https://www.googleapis.com/compute/v1/projects/[project_id]/global/images/[image-id] projects/[project_id]/global/images/[image-id] image-idImage family examples. Dataproc will use the most recent image from the family: https://www.googleapis.com/compute/v1/projects/[project_id]/global/images/family/[custom-image-family-name] projects/[project_id]/global/images/family/[custom-image-family-name]If the URI is unspecified, it will be inferred from SoftwareConfig.image_version or the system default. + - !ruby/object:Api::Type::String + name: 'machineTypeUri' + description: | + Optional. The Compute Engine machine type used for cluster instances.A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/machineTypes/n1-standard-2 projects/[project_id]/zones/[zone]/machineTypes/n1-standard-2 n1-standard-2Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, n1-standard-2. + - !ruby/object:Api::Type::NestedObject + name: 'diskConfig' + description: | + Specifies the config of disk options for a group of VM instances. + properties: + - !ruby/object:Api::Type::String + name: 'bootDiskType' + description: | + Optional. Type of the boot disk (default is "pd-standard"). Valid values: "pd-balanced" (Persistent Disk Balanced Solid State Drive), "pd-ssd" (Persistent Disk Solid State Drive), or "pd-standard" (Persistent Disk Hard Disk Drive). See Disk types (https://cloud.google.com/compute/docs/disks#disk-types). + - !ruby/object:Api::Type::Integer + name: 'bootDiskSizeGb' + description: | + Optional. Size in GB of the boot disk (default is 500GB). + - !ruby/object:Api::Type::Integer + name: 'numLocalSsds' + description: | + Optional. Number of attached SSDs, from 0 to 8 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and HDFS (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.Note: Local SSD options may vary by machine type and number of vCPUs selected. + - !ruby/object:Api::Type::String + name: 'localSsdInterface' + description: | + Optional. Interface type of local SSDs (default is "scsi"). Valid values: "scsi" (Small Computer System Interface), "nvme" (Non-Volatile Memory Express). See local SSD performance (https://cloud.google.com/compute/docs/disks/local-ssd#performance). + - !ruby/object:Api::Type::Boolean + name: 'isPreemptible' + description: | + Output only. Specifies that this instance group contains preemptible instances. + - !ruby/object:Api::Type::Enum + name: 'preemptibility' + description: | + Optional. Specifies the preemptibility of the instance group.The default value for master and worker groups is NON_PREEMPTIBLE. This default cannot be changed.The default value for secondary instances is PREEMPTIBLE. + values: + - :PREEMPTIBILITY_UNSPECIFIED + - :NON_PREEMPTIBLE + - :PREEMPTIBLE + - :SPOT + - !ruby/object:Api::Type::NestedObject + name: 'managedGroupConfig' + description: | + Specifies the resources used to actively manage an instance group. + properties: + - !ruby/object:Api::Type::String + name: 'instanceTemplateName' + description: | + Output only. The name of the Instance Template used for the Managed Instance Group. + - !ruby/object:Api::Type::String + name: 'instanceGroupManagerName' + description: | + Output only. The name of the Instance Group Manager for this group. + - !ruby/object:Api::Type::String + name: 'instanceGroupManagerUri' + description: | + Output only. The partial URI to the instance group manager for this group. E.g. projects/my-project/regions/us-central1/instanceGroupManagers/my-igm. + - !ruby/object:Api::Type::Array + name: 'accelerators' + description: | + Optional. The Compute Engine accelerator configuration for these instances. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'acceleratorTypeUri' + description: | + Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. + - !ruby/object:Api::Type::Integer + name: 'acceleratorCount' + description: | + The number of the accelerator cards of this type exposed to this instance. + - !ruby/object:Api::Type::String + name: 'minCpuPlatform' + description: | + Optional. Specifies the minimum cpu platform for the Instance Group. See Dataproc -> Minimum CPU Platform (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). + - !ruby/object:Api::Type::Integer + name: 'minNumInstances' + description: | + Optional. The minimum number of primary worker instances to create. If min_num_instances is set, cluster creation will succeed if the number of primary workers created is at least equal to the min_num_instances number.Example: Cluster creation request with num_instances = 5 and min_num_instances = 3: If 4 VMs are created and 1 instance fails, the failed VM is deleted. The cluster is resized to 4 instances and placed in a RUNNING state. If 2 instances are created and 3 instances fail, the cluster in placed in an ERROR state. The failed VMs are not deleted. + - !ruby/object:Api::Type::NestedObject + name: 'instanceFlexibilityPolicy' + description: | + Instance flexibility Policy allowing a mixture of VM shapes and provisioning models. + properties: + - !ruby/object:Api::Type::Array + name: 'instanceSelectionList' + description: | + Optional. List of instance selection options that the group will use when creating new VMs. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::Array + name: 'machineTypes' + description: | + Optional. Full machine-type names, e.g. "n1-standard-16". + item_type: Api::Type::String + - !ruby/object:Api::Type::Integer + name: 'rank' + description: | + Optional. Preference of this instance selection. Lower number means higher preference. Dataproc will first try to create a VM based on the machine-type with priority rank and fallback to next rank based on availability. Machine types and instance selections with the same priority have the same preference. + - !ruby/object:Api::Type::Array + name: 'instanceSelectionResults' + description: | + Output only. A list of instance selection results in the group. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'machineType' + description: | + Output only. Full machine-type names, e.g. "n1-standard-16". + - !ruby/object:Api::Type::Integer + name: 'vmCount' + description: | + Output only. Number of VM provisioned with the machine_type. + - !ruby/object:Api::Type::NestedObject + name: 'startupConfig' + description: | + Configuration to handle the startup of instances during cluster create and update process. + properties: + - !ruby/object:Api::Type::Integer + name: 'requiredRegistrationFraction' + description: | + Optional. The config setting to enable cluster creation/ updation to be successful only after required_registration_fraction of instances are up and running. This configuration is applicable to only secondary workers for now. The cluster will fail if required_registration_fraction of instances are not available. This will include instance creation, agent registration, and service registration (if enabled). + - !ruby/object:Api::Type::NestedObject + name: 'secondaryWorkerConfig' + description: | + The config settings for Compute Engine resources in an instance group, such as a master or worker group. + properties: + - !ruby/object:Api::Type::Integer + name: 'numInstances' + description: | + Optional. The number of VM instances in the instance group. For HA cluster master_config groups, must be set to 3. For standard cluster master_config groups, must be set to 1. + - !ruby/object:Api::Type::Array + name: 'instanceNames' + description: | + Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'instanceReferences' + description: | + Output only. List of references to Compute Engine instances. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'instanceName' + description: | + The user-friendly name of the Compute Engine instance. + - !ruby/object:Api::Type::String + name: 'instanceId' + description: | + The unique identifier of the Compute Engine instance. + - !ruby/object:Api::Type::String + name: 'publicKey' + description: | + The public RSA key used for sharing data with this instance. + - !ruby/object:Api::Type::String + name: 'publicEciesKey' + description: | + The public ECIES key used for sharing data with this instance. + - !ruby/object:Api::Type::String + name: 'imageUri' + description: | + Optional. The Compute Engine image resource used for cluster instances.The URI can represent an image or image family.Image examples: https://www.googleapis.com/compute/v1/projects/[project_id]/global/images/[image-id] projects/[project_id]/global/images/[image-id] image-idImage family examples. Dataproc will use the most recent image from the family: https://www.googleapis.com/compute/v1/projects/[project_id]/global/images/family/[custom-image-family-name] projects/[project_id]/global/images/family/[custom-image-family-name]If the URI is unspecified, it will be inferred from SoftwareConfig.image_version or the system default. + - !ruby/object:Api::Type::String + name: 'machineTypeUri' + description: | + Optional. The Compute Engine machine type used for cluster instances.A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/machineTypes/n1-standard-2 projects/[project_id]/zones/[zone]/machineTypes/n1-standard-2 n1-standard-2Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, n1-standard-2. + - !ruby/object:Api::Type::NestedObject + name: 'diskConfig' + description: | + Specifies the config of disk options for a group of VM instances. + properties: + - !ruby/object:Api::Type::String + name: 'bootDiskType' + description: | + Optional. Type of the boot disk (default is "pd-standard"). Valid values: "pd-balanced" (Persistent Disk Balanced Solid State Drive), "pd-ssd" (Persistent Disk Solid State Drive), or "pd-standard" (Persistent Disk Hard Disk Drive). See Disk types (https://cloud.google.com/compute/docs/disks#disk-types). + - !ruby/object:Api::Type::Integer + name: 'bootDiskSizeGb' + description: | + Optional. Size in GB of the boot disk (default is 500GB). + - !ruby/object:Api::Type::Integer + name: 'numLocalSsds' + description: | + Optional. Number of attached SSDs, from 0 to 8 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and HDFS (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.Note: Local SSD options may vary by machine type and number of vCPUs selected. + - !ruby/object:Api::Type::String + name: 'localSsdInterface' + description: | + Optional. Interface type of local SSDs (default is "scsi"). Valid values: "scsi" (Small Computer System Interface), "nvme" (Non-Volatile Memory Express). See local SSD performance (https://cloud.google.com/compute/docs/disks/local-ssd#performance). + - !ruby/object:Api::Type::Boolean + name: 'isPreemptible' + description: | + Output only. Specifies that this instance group contains preemptible instances. + - !ruby/object:Api::Type::Enum + name: 'preemptibility' + description: | + Optional. Specifies the preemptibility of the instance group.The default value for master and worker groups is NON_PREEMPTIBLE. This default cannot be changed.The default value for secondary instances is PREEMPTIBLE. + values: + - :PREEMPTIBILITY_UNSPECIFIED + - :NON_PREEMPTIBLE + - :PREEMPTIBLE + - :SPOT + - !ruby/object:Api::Type::NestedObject + name: 'managedGroupConfig' + description: | + Specifies the resources used to actively manage an instance group. + properties: + - !ruby/object:Api::Type::String + name: 'instanceTemplateName' + description: | + Output only. The name of the Instance Template used for the Managed Instance Group. + - !ruby/object:Api::Type::String + name: 'instanceGroupManagerName' + description: | + Output only. The name of the Instance Group Manager for this group. + - !ruby/object:Api::Type::String + name: 'instanceGroupManagerUri' + description: | + Output only. The partial URI to the instance group manager for this group. E.g. projects/my-project/regions/us-central1/instanceGroupManagers/my-igm. + - !ruby/object:Api::Type::Array + name: 'accelerators' + description: | + Optional. The Compute Engine accelerator configuration for these instances. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'acceleratorTypeUri' + description: | + Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. + - !ruby/object:Api::Type::Integer + name: 'acceleratorCount' + description: | + The number of the accelerator cards of this type exposed to this instance. + - !ruby/object:Api::Type::String + name: 'minCpuPlatform' + description: | + Optional. Specifies the minimum cpu platform for the Instance Group. See Dataproc -> Minimum CPU Platform (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). + - !ruby/object:Api::Type::Integer + name: 'minNumInstances' + description: | + Optional. The minimum number of primary worker instances to create. If min_num_instances is set, cluster creation will succeed if the number of primary workers created is at least equal to the min_num_instances number.Example: Cluster creation request with num_instances = 5 and min_num_instances = 3: If 4 VMs are created and 1 instance fails, the failed VM is deleted. The cluster is resized to 4 instances and placed in a RUNNING state. If 2 instances are created and 3 instances fail, the cluster in placed in an ERROR state. The failed VMs are not deleted. + - !ruby/object:Api::Type::NestedObject + name: 'instanceFlexibilityPolicy' + description: | + Instance flexibility Policy allowing a mixture of VM shapes and provisioning models. + properties: + - !ruby/object:Api::Type::Array + name: 'instanceSelectionList' + description: | + Optional. List of instance selection options that the group will use when creating new VMs. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::Array + name: 'machineTypes' + description: | + Optional. Full machine-type names, e.g. "n1-standard-16". + item_type: Api::Type::String + - !ruby/object:Api::Type::Integer + name: 'rank' + description: | + Optional. Preference of this instance selection. Lower number means higher preference. Dataproc will first try to create a VM based on the machine-type with priority rank and fallback to next rank based on availability. Machine types and instance selections with the same priority have the same preference. + - !ruby/object:Api::Type::Array + name: 'instanceSelectionResults' + description: | + Output only. A list of instance selection results in the group. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'machineType' + description: | + Output only. Full machine-type names, e.g. "n1-standard-16". + - !ruby/object:Api::Type::Integer + name: 'vmCount' + description: | + Output only. Number of VM provisioned with the machine_type. + - !ruby/object:Api::Type::NestedObject + name: 'startupConfig' + description: | + Configuration to handle the startup of instances during cluster create and update process. + properties: + - !ruby/object:Api::Type::Integer + name: 'requiredRegistrationFraction' + description: | + Optional. The config setting to enable cluster creation/ updation to be successful only after required_registration_fraction of instances are up and running. This configuration is applicable to only secondary workers for now. The cluster will fail if required_registration_fraction of instances are not available. This will include instance creation, agent registration, and service registration (if enabled). + - !ruby/object:Api::Type::NestedObject + name: 'softwareConfig' + description: | + Specifies the selection and config of software inside the cluster. + properties: + - !ruby/object:Api::Type::String + name: 'imageVersion' + description: | + Optional. The version of software inside the cluster. It must be one of the supported Dataproc Versions (https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#supported_dataproc_versions), such as "1.2" (including a subminor version, such as "1.2.29"), or the "preview" version (https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#other_versions). If unspecified, it defaults to the latest Debian version. + - !ruby/object:Api::Type::NestedObject + name: 'properties' + description: | + Optional. The properties to set on daemon config files.Property keys are specified in prefix:property format, for example core:hadoop.tmp.dir. The following are supported prefixes and their mappings: capacity-scheduler: capacity-scheduler.xml core: core-site.xml distcp: distcp-default.xml hdfs: hdfs-site.xml hive: hive-site.xml mapred: mapred-site.xml pig: pig.properties spark: spark-defaults.conf yarn: yarn-site.xmlFor more information, see Cluster properties (https://cloud.google.com/dataproc/docs/concepts/cluster-properties). + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::Array + name: 'optionalComponents' + description: | + Optional. The set of components to activate on the cluster. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'initializationActions' + description: | + Optional. Commands to execute on each node after config is completed. By default, executables are run on master and all worker nodes. You can test a node's role metadata to run an executable on a master or worker node, as shown below using curl (you can also use wget): ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) if [[ "${ROLE}" == 'Master' ]]; then ... master specific actions ... else ... worker specific actions ... fi + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'executableFile' + description: | + Required. Cloud Storage URI of executable file. + - !ruby/object:Api::Type::String + name: 'executionTimeout' + description: | + Optional. Amount of time executable has to complete. Default is 10 minutes (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)).Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period. + - !ruby/object:Api::Type::NestedObject + name: 'encryptionConfig' + description: | + Encryption settings for the cluster. + properties: + - !ruby/object:Api::Type::String + name: 'gcePdKmsKeyName' + description: | + Optional. The Cloud KMS key resource name to use for persistent disk encryption for all instances in the cluster. See Use CMEK with cluster data (https://cloud.google.com//dataproc/docs/concepts/configuring-clusters/customer-managed-encryption#use_cmek_with_cluster_data) for more information. + - !ruby/object:Api::Type::String + name: 'kmsKey' + description: | + Optional. The Cloud KMS key resource name to use for cluster persistent disk and job argument encryption. See Use CMEK with cluster data (https://cloud.google.com//dataproc/docs/concepts/configuring-clusters/customer-managed-encryption#use_cmek_with_cluster_data) for more information.When this key resource name is provided, the following job arguments of the following job types submitted to the cluster are encrypted using CMEK: FlinkJob args (https://cloud.google.com/dataproc/docs/reference/rest/v1/FlinkJob) HadoopJob args (https://cloud.google.com/dataproc/docs/reference/rest/v1/HadoopJob) SparkJob args (https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkJob) SparkRJob args (https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkRJob) PySparkJob args (https://cloud.google.com/dataproc/docs/reference/rest/v1/PySparkJob) SparkSqlJob (https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkSqlJob) scriptVariables and queryList.queries HiveJob (https://cloud.google.com/dataproc/docs/reference/rest/v1/HiveJob) scriptVariables and queryList.queries PigJob (https://cloud.google.com/dataproc/docs/reference/rest/v1/PigJob) scriptVariables and queryList.queries PrestoJob (https://cloud.google.com/dataproc/docs/reference/rest/v1/PrestoJob) scriptVariables and queryList.queries + - !ruby/object:Api::Type::NestedObject + name: 'autoscalingConfig' + description: | + Autoscaling Policy config associated with the cluster. + properties: + - !ruby/object:Api::Type::String + name: 'policyUri' + description: | + Optional. The autoscaling policy used by the cluster.Only resource names including projectid and location (region) are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id] projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]Note that the policy must be in the same project and Dataproc region. + - !ruby/object:Api::Type::NestedObject + name: 'securityConfig' + description: | + Security related configuration, including encryption, Kerberos, etc. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'kerberosConfig' + description: | + Specifies Kerberos related configuration. + properties: + - !ruby/object:Api::Type::Boolean + name: 'enableKerberos' + description: | + Optional. Flag to indicate whether to Kerberize the cluster (default: false). Set this field to true to enable Kerberos on a cluster. + - !ruby/object:Api::Type::String + name: 'rootPrincipalPasswordUri' + description: | + Optional. The Cloud Storage URI of a KMS encrypted file containing the root principal password. + - !ruby/object:Api::Type::String + name: 'kmsKeyUri' + description: | + Optional. The URI of the KMS key used to encrypt sensitive files. + - !ruby/object:Api::Type::String + name: 'keystoreUri' + description: | + Optional. The Cloud Storage URI of the keystore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate. + - !ruby/object:Api::Type::String + name: 'truststoreUri' + description: | + Optional. The Cloud Storage URI of the truststore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate. + - !ruby/object:Api::Type::String + name: 'keystorePasswordUri' + description: | + Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided keystore. For the self-signed certificate, this password is generated by Dataproc. + - !ruby/object:Api::Type::String + name: 'keyPasswordUri' + description: | + Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided key. For the self-signed certificate, this password is generated by Dataproc. + - !ruby/object:Api::Type::String + name: 'truststorePasswordUri' + description: | + Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided truststore. For the self-signed certificate, this password is generated by Dataproc. + - !ruby/object:Api::Type::String + name: 'crossRealmTrustRealm' + description: | + Optional. The remote realm the Dataproc on-cluster KDC will trust, should the user enable cross realm trust. + - !ruby/object:Api::Type::String + name: 'crossRealmTrustKdc' + description: | + Optional. The KDC (IP or hostname) for the remote trusted realm in a cross realm trust relationship. + - !ruby/object:Api::Type::String + name: 'crossRealmTrustAdminServer' + description: | + Optional. The admin server (IP or hostname) for the remote trusted realm in a cross realm trust relationship. + - !ruby/object:Api::Type::String + name: 'crossRealmTrustSharedPasswordUri' + description: | + Optional. The Cloud Storage URI of a KMS encrypted file containing the shared password between the on-cluster Kerberos realm and the remote trusted realm, in a cross realm trust relationship. + - !ruby/object:Api::Type::String + name: 'kdcDbKeyUri' + description: | + Optional. The Cloud Storage URI of a KMS encrypted file containing the master key of the KDC database. + - !ruby/object:Api::Type::Integer + name: 'tgtLifetimeHours' + description: | + Optional. The lifetime of the ticket granting ticket, in hours. If not specified, or user specifies 0, then default value 10 will be used. + - !ruby/object:Api::Type::String + name: 'realm' + description: | + Optional. The name of the on-cluster Kerberos realm. If not specified, the uppercased domain of hostnames will be the realm. + - !ruby/object:Api::Type::NestedObject + name: 'identityConfig' + description: | + Identity related configuration, including service account based secure multi-tenancy user mappings. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'userServiceAccountMapping' + description: | + Required. Map of user to service account. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'lifecycleConfig' + description: | + Specifies the cluster auto-delete schedule configuration. + properties: + - !ruby/object:Api::Type::String + name: 'idleDeleteTtl' + description: | + Optional. The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). + - !ruby/object:Api::Type::String + name: 'autoDeleteTime' + description: | + Optional. The time when cluster will be auto-deleted (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)). + - !ruby/object:Api::Type::String + name: 'autoDeleteTtl' + description: | + Optional. The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). + - !ruby/object:Api::Type::String + name: 'idleStartTime' + description: | + Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)). + - !ruby/object:Api::Type::NestedObject + name: 'endpointConfig' + description: | + Endpoint config for this cluster + properties: + - !ruby/object:Api::Type::NestedObject + name: 'httpPorts' + description: | + Output only. The map of port descriptions to URLs. Will only be populated if enable_http_port_access is true. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::Boolean + name: 'enableHttpPortAccess' + description: | + Optional. If true, enable http access to specific ports on the cluster from external sources. Defaults to false. + - !ruby/object:Api::Type::NestedObject + name: 'metastoreConfig' + description: | + Specifies a Metastore configuration. + properties: + - !ruby/object:Api::Type::String + name: 'dataprocMetastoreService' + description: | + Required. Resource name of an existing Dataproc Metastore service.Example: projects/[project_id]/locations/[dataproc_region]/services/[service-name] + - !ruby/object:Api::Type::NestedObject + name: 'gkeClusterConfig' + description: | + The cluster's GKE config. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'namespacedGkeDeploymentTarget' + description: | + Deprecated. Used only for the deprecated beta. A full, namespace-isolated deployment target for an existing GKE cluster. + properties: + - !ruby/object:Api::Type::String + name: 'targetGkeCluster' + description: | + Optional. The target GKE cluster to deploy to. Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}' + - !ruby/object:Api::Type::String + name: 'clusterNamespace' + description: | + Optional. A namespace within the GKE cluster to deploy into. + - !ruby/object:Api::Type::String + name: 'gkeClusterTarget' + description: | + Optional. A target GKE cluster to deploy to. It must be in the same project and region as the Dataproc cluster (the GKE cluster can be zonal or regional). Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}' + - !ruby/object:Api::Type::Array + name: 'nodePoolTarget' + description: | + Optional. GKE node pools where workloads will be scheduled. At least one node pool must be assigned the DEFAULT GkeNodePoolTarget.Role. If a GkeNodePoolTarget is not specified, Dataproc constructs a DEFAULT GkeNodePoolTarget. Each role can be given to only one GkeNodePoolTarget. All node pools must have the same location settings. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'nodePool' + description: | + Required. The target GKE node pool. Format: 'projects/{project}/locations/{location}/clusters/{cluster}/nodePools/{node_pool}' + - !ruby/object:Api::Type::Array + name: 'roles' + description: | + Required. The roles associated with the GKE node pool. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'nodePoolConfig' + description: | + The configuration of a GKE node pool used by a Dataproc-on-GKE cluster (https://cloud.google.com/dataproc/docs/concepts/jobs/dataproc-gke#create-a-dataproc-on-gke-cluster). + properties: + - !ruby/object:Api::Type::NestedObject + name: 'config' + description: | + Parameters that describe cluster nodes. + properties: + - !ruby/object:Api::Type::String + name: 'machineType' + description: | + Optional. The name of a Compute Engine machine type (https://cloud.google.com/compute/docs/machine-types). + - !ruby/object:Api::Type::Integer + name: 'localSsdCount' + description: | + Optional. The number of local SSD disks to attach to the node, which is limited by the maximum number of disks allowable per zone (see Adding Local SSDs (https://cloud.google.com/compute/docs/disks/local-ssd)). + - !ruby/object:Api::Type::Boolean + name: 'preemptible' + description: | + Optional. Whether the nodes are created as legacy preemptible VM instances (https://cloud.google.com/compute/docs/instances/preemptible). Also see Spot VMs, preemptible VM instances without a maximum lifetime. Legacy and Spot preemptible nodes cannot be used in a node pool with the CONTROLLER role or in the DEFAULT node pool if the CONTROLLER role is not assigned (the DEFAULT node pool will assume the CONTROLLER role). + - !ruby/object:Api::Type::Array + name: 'accelerators' + description: | + Optional. A list of hardware accelerators (https://cloud.google.com/compute/docs/gpus) to attach to each node. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'acceleratorCount' + description: | + The number of accelerator cards exposed to an instance. + - !ruby/object:Api::Type::String + name: 'acceleratorType' + description: | + The accelerator type resource namename (see GPUs on Compute Engine). + - !ruby/object:Api::Type::String + name: 'gpuPartitionSize' + description: | + Size of partitions to create on the GPU. Valid values are described in the NVIDIA mig user guide (https://docs.nvidia.com/datacenter/tesla/mig-user-guide/#partitioning). + - !ruby/object:Api::Type::String + name: 'minCpuPlatform' + description: | + Optional. Minimum CPU platform (https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform) to be used by this instance. The instance may be scheduled on the specified or a newer CPU platform. Specify the friendly names of CPU platforms, such as "Intel Haswell"` or Intel Sandy Bridge". + - !ruby/object:Api::Type::String + name: 'bootDiskKmsKey' + description: | + Optional. The Customer Managed Encryption Key (CMEK) (https://cloud.google.com/kubernetes-engine/docs/how-to/using-cmek) used to encrypt the boot disk attached to each node in the node pool. Specify the key using the following format: projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key} + - !ruby/object:Api::Type::Boolean + name: 'spot' + description: | + Optional. Whether the nodes are created as Spot VM instances (https://cloud.google.com/compute/docs/instances/spot). Spot VMs are the latest update to legacy preemptible VMs. Spot VMs do not have a maximum lifetime. Legacy and Spot preemptible nodes cannot be used in a node pool with the CONTROLLER role or in the DEFAULT node pool if the CONTROLLER role is not assigned (the DEFAULT node pool will assume the CONTROLLER role). + - !ruby/object:Api::Type::Array + name: 'locations' + description: | + Optional. The list of Compute Engine zones (https://cloud.google.com/compute/docs/zones#available) where node pool nodes associated with a Dataproc on GKE virtual cluster will be located.Note: All node pools associated with a virtual cluster must be located in the same region as the virtual cluster, and they must be located in the same zone within that region.If a location is not specified during node pool creation, Dataproc on GKE will choose the zone. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'autoscaling' + description: | + GkeNodePoolAutoscaling contains information the cluster autoscaler needs to adjust the size of the node pool to the current cluster usage. + properties: + - !ruby/object:Api::Type::Integer + name: 'minNodeCount' + description: | + The minimum number of nodes in the node pool. Must be >= 0 and <= max_node_count. + - !ruby/object:Api::Type::Integer + name: 'maxNodeCount' + description: | + The maximum number of nodes in the node pool. Must be >= min_node_count, and must be > 0. Note: Quota must be sufficient to scale up the cluster. + - !ruby/object:Api::Type::NestedObject + name: 'dataprocMetricConfig' + description: | + Dataproc metric config. + properties: + - !ruby/object:Api::Type::Array + name: 'metrics' + description: | + Required. Metrics sources to enable. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::Enum + name: 'metricSource' + description: | + Required. A standard set of metrics is collected unless metricOverrides are specified for the metric source (see Custom metrics (https://cloud.google.com/dataproc/docs/guides/dataproc-metrics#custom_metrics) for more information). + values: + - :METRIC_SOURCE_UNSPECIFIED + - :MONITORING_AGENT_DEFAULTS + - :HDFS + - :SPARK + - :YARN + - :SPARK_HISTORY_SERVER + - :HIVESERVER2 + - :HIVEMETASTORE + - :FLINK + - !ruby/object:Api::Type::Array + name: 'metricOverrides' + description: | + Optional. Specify one or more Custom metrics (https://cloud.google.com/dataproc/docs/guides/dataproc-metrics#custom_metrics) to collect for the metric course (for the SPARK metric source (any Spark metric (https://spark.apache.org/docs/latest/monitoring.html#metrics) can be specified).Provide metrics in the following format: METRIC_SOURCE: INSTANCE:GROUP:METRIC Use camelcase as appropriate.Examples: yarn:ResourceManager:QueueMetrics:AppsCompleted spark:driver:DAGScheduler:job.allJobs sparkHistoryServer:JVM:Memory:NonHeapMemoryUsage.committed hiveserver2:JVM:Memory:NonHeapMemoryUsage.used Notes: Only the specified overridden metrics are collected for the metric source. For example, if one or more spark:executive metrics are listed as metric overrides, other SPARK metrics are not collected. The collection of the metrics for other enabled custom metric sources is unaffected. For example, if both SPARK andd YARN metric sources are enabled, and overrides are provided for Spark metrics only, all YARN metrics are collected. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'auxiliaryNodeGroups' + description: | + Optional. The node group settings. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::NestedObject + name: 'nodeGroup' + description: | + Dataproc Node Group. The Dataproc NodeGroup resource is not related to the Dataproc NodeGroupAffinity resource. + properties: + - !ruby/object:Api::Type::String + name: 'name' + description: | + The Node group resource name (https://aip.dev/122). + - !ruby/object:Api::Type::Array + name: 'roles' + description: | + Required. Node group roles. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'nodeGroupConfig' + description: | + The config settings for Compute Engine resources in an instance group, such as a master or worker group. + properties: + - !ruby/object:Api::Type::Integer + name: 'numInstances' + description: | + Optional. The number of VM instances in the instance group. For HA cluster master_config groups, must be set to 3. For standard cluster master_config groups, must be set to 1. + - !ruby/object:Api::Type::Array + name: 'instanceNames' + description: | + Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'instanceReferences' + description: | + Output only. List of references to Compute Engine instances. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'instanceName' + description: | + The user-friendly name of the Compute Engine instance. + - !ruby/object:Api::Type::String + name: 'instanceId' + description: | + The unique identifier of the Compute Engine instance. + - !ruby/object:Api::Type::String + name: 'publicKey' + description: | + The public RSA key used for sharing data with this instance. + - !ruby/object:Api::Type::String + name: 'publicEciesKey' + description: | + The public ECIES key used for sharing data with this instance. + - !ruby/object:Api::Type::String + name: 'imageUri' + description: | + Optional. The Compute Engine image resource used for cluster instances.The URI can represent an image or image family.Image examples: https://www.googleapis.com/compute/v1/projects/[project_id]/global/images/[image-id] projects/[project_id]/global/images/[image-id] image-idImage family examples. Dataproc will use the most recent image from the family: https://www.googleapis.com/compute/v1/projects/[project_id]/global/images/family/[custom-image-family-name] projects/[project_id]/global/images/family/[custom-image-family-name]If the URI is unspecified, it will be inferred from SoftwareConfig.image_version or the system default. + - !ruby/object:Api::Type::String + name: 'machineTypeUri' + description: | + Optional. The Compute Engine machine type used for cluster instances.A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/machineTypes/n1-standard-2 projects/[project_id]/zones/[zone]/machineTypes/n1-standard-2 n1-standard-2Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, n1-standard-2. + - !ruby/object:Api::Type::NestedObject + name: 'diskConfig' + description: | + Specifies the config of disk options for a group of VM instances. + properties: + - !ruby/object:Api::Type::String + name: 'bootDiskType' + description: | + Optional. Type of the boot disk (default is "pd-standard"). Valid values: "pd-balanced" (Persistent Disk Balanced Solid State Drive), "pd-ssd" (Persistent Disk Solid State Drive), or "pd-standard" (Persistent Disk Hard Disk Drive). See Disk types (https://cloud.google.com/compute/docs/disks#disk-types). + - !ruby/object:Api::Type::Integer + name: 'bootDiskSizeGb' + description: | + Optional. Size in GB of the boot disk (default is 500GB). + - !ruby/object:Api::Type::Integer + name: 'numLocalSsds' + description: | + Optional. Number of attached SSDs, from 0 to 8 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and HDFS (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.Note: Local SSD options may vary by machine type and number of vCPUs selected. + - !ruby/object:Api::Type::String + name: 'localSsdInterface' + description: | + Optional. Interface type of local SSDs (default is "scsi"). Valid values: "scsi" (Small Computer System Interface), "nvme" (Non-Volatile Memory Express). See local SSD performance (https://cloud.google.com/compute/docs/disks/local-ssd#performance). + - !ruby/object:Api::Type::Boolean + name: 'isPreemptible' + description: | + Output only. Specifies that this instance group contains preemptible instances. + - !ruby/object:Api::Type::Enum + name: 'preemptibility' + description: | + Optional. Specifies the preemptibility of the instance group.The default value for master and worker groups is NON_PREEMPTIBLE. This default cannot be changed.The default value for secondary instances is PREEMPTIBLE. + values: + - :PREEMPTIBILITY_UNSPECIFIED + - :NON_PREEMPTIBLE + - :PREEMPTIBLE + - :SPOT + - !ruby/object:Api::Type::NestedObject + name: 'managedGroupConfig' + description: | + Specifies the resources used to actively manage an instance group. + properties: + - !ruby/object:Api::Type::String + name: 'instanceTemplateName' + description: | + Output only. The name of the Instance Template used for the Managed Instance Group. + - !ruby/object:Api::Type::String + name: 'instanceGroupManagerName' + description: | + Output only. The name of the Instance Group Manager for this group. + - !ruby/object:Api::Type::String + name: 'instanceGroupManagerUri' + description: | + Output only. The partial URI to the instance group manager for this group. E.g. projects/my-project/regions/us-central1/instanceGroupManagers/my-igm. + - !ruby/object:Api::Type::Array + name: 'accelerators' + description: | + Optional. The Compute Engine accelerator configuration for these instances. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'acceleratorTypeUri' + description: | + Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. + - !ruby/object:Api::Type::Integer + name: 'acceleratorCount' + description: | + The number of the accelerator cards of this type exposed to this instance. + - !ruby/object:Api::Type::String + name: 'minCpuPlatform' + description: | + Optional. Specifies the minimum cpu platform for the Instance Group. See Dataproc -> Minimum CPU Platform (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). + - !ruby/object:Api::Type::Integer + name: 'minNumInstances' + description: | + Optional. The minimum number of primary worker instances to create. If min_num_instances is set, cluster creation will succeed if the number of primary workers created is at least equal to the min_num_instances number.Example: Cluster creation request with num_instances = 5 and min_num_instances = 3: If 4 VMs are created and 1 instance fails, the failed VM is deleted. The cluster is resized to 4 instances and placed in a RUNNING state. If 2 instances are created and 3 instances fail, the cluster in placed in an ERROR state. The failed VMs are not deleted. + - !ruby/object:Api::Type::NestedObject + name: 'instanceFlexibilityPolicy' + description: | + Instance flexibility Policy allowing a mixture of VM shapes and provisioning models. + properties: + - !ruby/object:Api::Type::Array + name: 'instanceSelectionList' + description: | + Optional. List of instance selection options that the group will use when creating new VMs. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::Array + name: 'machineTypes' + description: | + Optional. Full machine-type names, e.g. "n1-standard-16". + item_type: Api::Type::String + - !ruby/object:Api::Type::Integer + name: 'rank' + description: | + Optional. Preference of this instance selection. Lower number means higher preference. Dataproc will first try to create a VM based on the machine-type with priority rank and fallback to next rank based on availability. Machine types and instance selections with the same priority have the same preference. + - !ruby/object:Api::Type::Array + name: 'instanceSelectionResults' + description: | + Output only. A list of instance selection results in the group. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'machineType' + description: | + Output only. Full machine-type names, e.g. "n1-standard-16". + - !ruby/object:Api::Type::Integer + name: 'vmCount' + description: | + Output only. Number of VM provisioned with the machine_type. + - !ruby/object:Api::Type::NestedObject + name: 'startupConfig' + description: | + Configuration to handle the startup of instances during cluster create and update process. + properties: + - !ruby/object:Api::Type::Integer + name: 'requiredRegistrationFraction' + description: | + Optional. The config setting to enable cluster creation/ updation to be successful only after required_registration_fraction of instances are up and running. This configuration is applicable to only secondary workers for now. The cluster will fail if required_registration_fraction of instances are not available. This will include instance creation, agent registration, and service registration (if enabled). + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + Optional. Node group labels. Label keys must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty. If specified, they must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). The node group must have no more than 32 labelsn. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::String + name: 'nodeGroupId' + description: | + Optional. A node group ID. Generated if not specified.The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of from 3 to 33 characters. + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + Optional. The labels to associate with this cluster.Label keys must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: \p{Ll}\p{Lo}{0,62}Label values must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: \p{Ll}\p{Lo}\p{N}_-{0,63}No more than 32 labels can be associated with a given cluster. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'clusterSelector' + description: | + A selector that chooses target cluster for jobs based on metadata. + properties: + - !ruby/object:Api::Type::String + name: 'zone' + description: | + Optional. The zone where workflow process executes. This parameter does not affect the selection of the cluster.If unspecified, the zone of the first cluster matching the selector is used. + - !ruby/object:Api::Type::NestedObject + name: 'clusterLabels' + description: | + Required. The cluster labels. Cluster must have all labels to match. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::Array + name: 'jobs' + description: | + Required. The Directed Acyclic Graph of Jobs to submit. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'stepId' + description: | + Required. The step id. The id must be unique among all jobs within the template.The step id is used as prefix for job id, as job goog-dataproc-workflow-step-id label, and in prerequisiteStepIds field from other steps.The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between 3 and 50 characters. + - !ruby/object:Api::Type::NestedObject + name: 'hadoopJob' + description: | + A Dataproc job for running Apache Hadoop MapReduce (https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html) jobs on Apache Hadoop YARN (https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html). + properties: + - !ruby/object:Api::Type::String + name: 'mainJarFileUri' + description: | + The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar' + - !ruby/object:Api::Type::String + name: 'mainClass' + description: | + The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in jar_file_uris. + - !ruby/object:Api::Type::Array + name: 'args' + description: | + Optional. The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision might occur that causes an incorrect job submission. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'jarFileUris' + description: | + Optional. Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'fileUris' + description: | + Optional. HCFS (Hadoop Compatible Filesystem) URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'archiveUris' + description: | + Optional. HCFS URIs of archives to be extracted in the working directory of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .zip. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'properties' + description: | + Optional. A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes in user code. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'loggingConfig' + description: | + The runtime logging config of the job. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'driverLogLevels' + description: | + The per-package log levels for the driver. This can include "root" package name to configure rootLogger. Examples: - 'com.google = FATAL' - 'root = INFO' - 'org.apache = DEBUG' + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'sparkJob' + description: | + A Dataproc job for running Apache Spark (https://spark.apache.org/) applications on YARN. + properties: + - !ruby/object:Api::Type::String + name: 'mainJarFileUri' + description: | + The HCFS URI of the jar file that contains the main class. + - !ruby/object:Api::Type::String + name: 'mainClass' + description: | + The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in SparkJob.jar_file_uris. + - !ruby/object:Api::Type::Array + name: 'args' + description: | + Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'jarFileUris' + description: | + Optional. HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'fileUris' + description: | + Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'archiveUris' + description: | + Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'properties' + description: | + Optional. A mapping of property names to values, used to configure Spark. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'loggingConfig' + description: | + The runtime logging config of the job. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'driverLogLevels' + description: | + The per-package log levels for the driver. This can include "root" package name to configure rootLogger. Examples: - 'com.google = FATAL' - 'root = INFO' - 'org.apache = DEBUG' + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'pysparkJob' + description: | + A Dataproc job for running Apache PySpark (https://spark.apache.org/docs/0.9.0/python-programming-guide.html) applications on YARN. + properties: + - !ruby/object:Api::Type::String + name: 'mainPythonFileUri' + description: | + Required. The HCFS URI of the main Python file to use as the driver. Must be a .py file. + - !ruby/object:Api::Type::Array + name: 'args' + description: | + Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'pythonFileUris' + description: | + Optional. HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'jarFileUris' + description: | + Optional. HCFS URIs of jar files to add to the CLASSPATHs of the Python driver and tasks. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'fileUris' + description: | + Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'archiveUris' + description: | + Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'properties' + description: | + Optional. A mapping of property names to values, used to configure PySpark. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'loggingConfig' + description: | + The runtime logging config of the job. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'driverLogLevels' + description: | + The per-package log levels for the driver. This can include "root" package name to configure rootLogger. Examples: - 'com.google = FATAL' - 'root = INFO' - 'org.apache = DEBUG' + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'hiveJob' + description: | + A Dataproc job for running Apache Hive (https://hive.apache.org/) queries on YARN. + properties: + - !ruby/object:Api::Type::String + name: 'queryFileUri' + description: | + The HCFS URI of the script that contains Hive queries. + - !ruby/object:Api::Type::NestedObject + name: 'queryList' + description: | + A list of queries to run on a cluster. + properties: + - !ruby/object:Api::Type::Array + name: 'queries' + description: | + Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": [ "query1", "query2", "query3;query4", ] } } + item_type: Api::Type::String + - !ruby/object:Api::Type::Boolean + name: 'continueOnFailure' + description: | + Optional. Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries. + - !ruby/object:Api::Type::NestedObject + name: 'scriptVariables' + description: | + Optional. Mapping of query variable names to values (equivalent to the Hive command: SET name="value";). + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'properties' + description: | + Optional. A mapping of property names and values, used to configure Hive. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/hive/conf/hive-site.xml, and classes in user code. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::Array + name: 'jarFileUris' + description: | + Optional. HCFS URIs of jar files to add to the CLASSPATH of the Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes and UDFs. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'pigJob' + description: | + A Dataproc job for running Apache Pig (https://pig.apache.org/) queries on YARN. + properties: + - !ruby/object:Api::Type::String + name: 'queryFileUri' + description: | + The HCFS URI of the script that contains the Pig queries. + - !ruby/object:Api::Type::NestedObject + name: 'queryList' + description: | + A list of queries to run on a cluster. + properties: + - !ruby/object:Api::Type::Array + name: 'queries' + description: | + Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": [ "query1", "query2", "query3;query4", ] } } + item_type: Api::Type::String + - !ruby/object:Api::Type::Boolean + name: 'continueOnFailure' + description: | + Optional. Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries. + - !ruby/object:Api::Type::NestedObject + name: 'scriptVariables' + description: | + Optional. Mapping of query variable names to values (equivalent to the Pig command: name=[value]). + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'properties' + description: | + Optional. A mapping of property names to values, used to configure Pig. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/pig/conf/pig.properties, and classes in user code. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::Array + name: 'jarFileUris' + description: | + Optional. HCFS URIs of jar files to add to the CLASSPATH of the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'loggingConfig' + description: | + The runtime logging config of the job. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'driverLogLevels' + description: | + The per-package log levels for the driver. This can include "root" package name to configure rootLogger. Examples: - 'com.google = FATAL' - 'root = INFO' - 'org.apache = DEBUG' + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'sparkRJob' + description: | + A Dataproc job for running Apache SparkR (https://spark.apache.org/docs/latest/sparkr.html) applications on YARN. + properties: + - !ruby/object:Api::Type::String + name: 'mainRFileUri' + description: | + Required. The HCFS URI of the main R file to use as the driver. Must be a .R file. + - !ruby/object:Api::Type::Array + name: 'args' + description: | + Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'fileUris' + description: | + Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'archiveUris' + description: | + Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'properties' + description: | + Optional. A mapping of property names to values, used to configure SparkR. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'loggingConfig' + description: | + The runtime logging config of the job. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'driverLogLevels' + description: | + The per-package log levels for the driver. This can include "root" package name to configure rootLogger. Examples: - 'com.google = FATAL' - 'root = INFO' - 'org.apache = DEBUG' + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'sparkSqlJob' + description: | + A Dataproc job for running Apache Spark SQL (https://spark.apache.org/sql/) queries. + properties: + - !ruby/object:Api::Type::String + name: 'queryFileUri' + description: | + The HCFS URI of the script that contains SQL queries. + - !ruby/object:Api::Type::NestedObject + name: 'queryList' + description: | + A list of queries to run on a cluster. + properties: + - !ruby/object:Api::Type::Array + name: 'queries' + description: | + Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": [ "query1", "query2", "query3;query4", ] } } + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'scriptVariables' + description: | + Optional. Mapping of query variable names to values (equivalent to the Spark SQL command: SET name="value";). + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'properties' + description: | + Optional. A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Dataproc API might be overwritten. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::Array + name: 'jarFileUris' + description: | + Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'loggingConfig' + description: | + The runtime logging config of the job. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'driverLogLevels' + description: | + The per-package log levels for the driver. This can include "root" package name to configure rootLogger. Examples: - 'com.google = FATAL' - 'root = INFO' - 'org.apache = DEBUG' + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'prestoJob' + description: | + A Dataproc job for running Presto (https://prestosql.io/) queries. IMPORTANT: The Dataproc Presto Optional Component (https://cloud.google.com/dataproc/docs/concepts/components/presto) must be enabled when the cluster is created to submit a Presto job to the cluster. + properties: + - !ruby/object:Api::Type::String + name: 'queryFileUri' + description: | + The HCFS URI of the script that contains SQL queries. + - !ruby/object:Api::Type::NestedObject + name: 'queryList' + description: | + A list of queries to run on a cluster. + properties: + - !ruby/object:Api::Type::Array + name: 'queries' + description: | + Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": [ "query1", "query2", "query3;query4", ] } } + item_type: Api::Type::String + - !ruby/object:Api::Type::Boolean + name: 'continueOnFailure' + description: | + Optional. Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries. + - !ruby/object:Api::Type::String + name: 'outputFormat' + description: | + Optional. The format in which query output will be displayed. See the Presto documentation for supported output formats + - !ruby/object:Api::Type::Array + name: 'clientTags' + description: | + Optional. Presto client tags to attach to this query + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'properties' + description: | + Optional. A mapping of property names to values. Used to set Presto session properties (https://prestodb.io/docs/current/sql/set-session.html) Equivalent to using the --session flag in the Presto CLI + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'loggingConfig' + description: | + The runtime logging config of the job. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'driverLogLevels' + description: | + The per-package log levels for the driver. This can include "root" package name to configure rootLogger. Examples: - 'com.google = FATAL' - 'root = INFO' - 'org.apache = DEBUG' + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'trinoJob' + description: | + A Dataproc job for running Trino (https://trino.io/) queries. IMPORTANT: The Dataproc Trino Optional Component (https://cloud.google.com/dataproc/docs/concepts/components/trino) must be enabled when the cluster is created to submit a Trino job to the cluster. + properties: + - !ruby/object:Api::Type::String + name: 'queryFileUri' + description: | + The HCFS URI of the script that contains SQL queries. + - !ruby/object:Api::Type::NestedObject + name: 'queryList' + description: | + A list of queries to run on a cluster. + properties: + - !ruby/object:Api::Type::Array + name: 'queries' + description: | + Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": [ "query1", "query2", "query3;query4", ] } } + item_type: Api::Type::String + - !ruby/object:Api::Type::Boolean + name: 'continueOnFailure' + description: | + Optional. Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries. + - !ruby/object:Api::Type::String + name: 'outputFormat' + description: | + Optional. The format in which query output will be displayed. See the Trino documentation for supported output formats + - !ruby/object:Api::Type::Array + name: 'clientTags' + description: | + Optional. Trino client tags to attach to this query + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'properties' + description: | + Optional. A mapping of property names to values. Used to set Trino session properties (https://trino.io/docs/current/sql/set-session.html) Equivalent to using the --session flag in the Trino CLI + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'loggingConfig' + description: | + The runtime logging config of the job. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'driverLogLevels' + description: | + The per-package log levels for the driver. This can include "root" package name to configure rootLogger. Examples: - 'com.google = FATAL' - 'root = INFO' - 'org.apache = DEBUG' + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'flinkJob' + description: | + A Dataproc job for running Apache Flink applications on YARN. + properties: + - !ruby/object:Api::Type::String + name: 'mainJarFileUri' + description: | + The HCFS URI of the jar file that contains the main class. + - !ruby/object:Api::Type::String + name: 'mainClass' + description: | + The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in jarFileUris. + - !ruby/object:Api::Type::Array + name: 'args' + description: | + Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision might occur that causes an incorrect job submission. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'jarFileUris' + description: | + Optional. HCFS URIs of jar files to add to the CLASSPATHs of the Flink driver and tasks. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'savepointUri' + description: | + Optional. HCFS URI of the savepoint, which contains the last saved progress for starting the current job. + - !ruby/object:Api::Type::NestedObject + name: 'properties' + description: | + Optional. A mapping of property names to values, used to configure Flink. Properties that conflict with values set by the Dataproc API might beoverwritten. Can include properties set in/etc/flink/conf/flink-defaults.conf and classes in user code. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'loggingConfig' + description: | + The runtime logging config of the job. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'driverLogLevels' + description: | + The per-package log levels for the driver. This can include "root" package name to configure rootLogger. Examples: - 'com.google = FATAL' - 'root = INFO' - 'org.apache = DEBUG' + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + Optional. The labels to associate with this job.Label keys must be between 1 and 63 characters long, and must conform to the following regular expression: \p{Ll}\p{Lo}{0,62}Label values must be between 1 and 63 characters long, and must conform to the following regular expression: \p{Ll}\p{Lo}\p{N}_-{0,63}No more than 32 labels can be associated with a given job. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'scheduling' + description: | + Job scheduling options. + properties: + - !ruby/object:Api::Type::Integer + name: 'maxFailuresPerHour' + description: | + Optional. Maximum number of times per hour a driver can be restarted as a result of driver exiting with non-zero code before job is reported failed.A job might be reported as thrashing if the driver exits with a non-zero code four times within a 10-minute window.Maximum value is 10.Note: This restartable job option is not supported in Dataproc workflow templates (https://cloud.google.com/dataproc/docs/concepts/workflows/using-workflows#adding_jobs_to_a_template). + - !ruby/object:Api::Type::Integer + name: 'maxFailuresTotal' + description: | + Optional. Maximum total number of times a driver can be restarted as a result of the driver exiting with a non-zero code. After the maximum number is reached, the job will be reported as failed.Maximum value is 240.Note: Currently, this restartable job option is not supported in Dataproc workflow templates (https://cloud.google.com/dataproc/docs/concepts/workflows/using-workflows#adding_jobs_to_a_template). + - !ruby/object:Api::Type::Array + name: 'prerequisiteStepIds' + description: | + Optional. The optional list of prerequisite job step_ids. If not specified, the job will start at the beginning of workflow. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'parameters' + description: | + Optional. Template parameters whose values are substituted into the template. Values for parameters must be provided when the template is instantiated. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'name' + description: | + Required. Parameter name. The parameter name is used as the key, and paired with the parameter value, which are passed to the template when the template is instantiated. The name must contain only capital letters (A-Z), numbers (0-9), and underscores (_), and must not start with a number. The maximum length is 40 characters. + - !ruby/object:Api::Type::Array + name: 'fields' + description: | + Required. Paths to all fields that the parameter replaces. A field is allowed to appear in at most one parameter's list of field paths.A field path is similar in syntax to a google.protobuf.FieldMask. For example, a field path that references the zone field of a workflow template's cluster selector would be specified as placement.clusterSelector.zone.Also, field paths can reference fields using the following syntax: Values in maps can be referenced by key: labels'key' placement.clusterSelector.clusterLabels'key' placement.managedCluster.labels'key' placement.clusterSelector.clusterLabels'key' jobs'step-id'.labels'key' Jobs in the jobs list can be referenced by step-id: jobs'step-id'.hadoopJob.mainJarFileUri jobs'step-id'.hiveJob.queryFileUri jobs'step-id'.pySparkJob.mainPythonFileUri jobs'step-id'.hadoopJob.jarFileUris0 jobs'step-id'.hadoopJob.archiveUris0 jobs'step-id'.hadoopJob.fileUris0 jobs'step-id'.pySparkJob.pythonFileUris0 Items in repeated fields can be referenced by a zero-based index: jobs'step-id'.sparkJob.args0 Other examples: jobs'step-id'.hadoopJob.properties'key' jobs'step-id'.hadoopJob.args0 jobs'step-id'.hiveJob.scriptVariables'key' jobs'step-id'.hadoopJob.mainJarFileUri placement.clusterSelector.zoneIt may not be possible to parameterize maps and repeated fields in their entirety since only individual map values and individual items in repeated fields can be referenced. For example, the following field paths are invalid: placement.clusterSelector.clusterLabels jobs'step-id'.sparkJob.args + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'description' + description: | + Optional. Brief description of the parameter. Must not exceed 1024 characters. + - !ruby/object:Api::Type::NestedObject + name: 'validation' + description: | + Configuration for parameter validation. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'regex' + description: | + Validation based on regular expressions. + properties: + - !ruby/object:Api::Type::Array + name: 'regexes' + description: | + Required. RE2 regular expressions used to validate the parameter's value. The value must match the regex in its entirety (substring matches are not sufficient). + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'values' + description: | + Validation based on a list of allowed values. + properties: + - !ruby/object:Api::Type::Array + name: 'values' + description: | + Required. List of allowed values for the parameter. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'dagTimeout' + description: | + Optional. Timeout duration for the DAG of jobs, expressed in seconds (see JSON representation of duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). The timeout duration must be from 10 minutes ("600s") to 24 hours ("86400s"). The timer begins when the first job is submitted. If the workflow is running at the end of the timeout period, any remaining jobs are cancelled, the workflow is ended, and if the workflow was running on a managed cluster, the cluster is deleted. + - !ruby/object:Api::Type::NestedObject + name: 'encryptionConfig' + description: | + Encryption settings for encrypting workflow template job arguments. + properties: + - !ruby/object:Api::Type::String + name: 'kmsKey' + description: | + Optional. The Cloud KMS key name to use for encrypting workflow template job arguments.When this this key is provided, the following workflow template job arguments (https://cloud.google.com/dataproc/docs/concepts/workflows/use-workflows#adding_jobs_to_a_template), if present, are CMEK encrypted (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/customer-managed-encryption#use_cmek_with_workflow_template_data): FlinkJob args (https://cloud.google.com/dataproc/docs/reference/rest/v1/FlinkJob) HadoopJob args (https://cloud.google.com/dataproc/docs/reference/rest/v1/HadoopJob) SparkJob args (https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkJob) SparkRJob args (https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkRJob) PySparkJob args (https://cloud.google.com/dataproc/docs/reference/rest/v1/PySparkJob) SparkSqlJob (https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkSqlJob) scriptVariables and queryList.queries HiveJob (https://cloud.google.com/dataproc/docs/reference/rest/v1/HiveJob) scriptVariables and queryList.queries PigJob (https://cloud.google.com/dataproc/docs/reference/rest/v1/PigJob) scriptVariables and queryList.queries PrestoJob (https://cloud.google.com/dataproc/docs/reference/rest/v1/PrestoJob) scriptVariables and queryList.queries + + + + + - !ruby/object:Api::Resource + name: ProjectLocationWorkflowTemplate + base_url: '{{+parent}}/workflowTemplates' + self_link: '{{+name}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/dataproc/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + A Dataproc workflow template resource. + properties: + + - !ruby/object:Api::Type::String + name: 'id' + description: | + + - !ruby/object:Api::Type::String + name: 'name' + description: | + Output only. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} For projects.locations.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id} + - !ruby/object:Api::Type::Integer + name: 'version' + description: | + Optional. Used to perform a consistent read-modify-write.This field should be left blank for a CreateWorkflowTemplate request. It is required for an UpdateWorkflowTemplate request, and must match the current server version. A typical update template flow would fetch the current template with a GetWorkflowTemplate request, which will return the current template with the version field filled in with the current server version. The user updates other fields in the template, then returns it as part of the UpdateWorkflowTemplate request. + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Output only. The time template was created. + - !ruby/object:Api::Type::String + name: 'updateTime' + description: | + Output only. The time template was last updated. + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + Optional. The labels to associate with this template. These labels will be propagated to all jobs and clusters created by the workflow instance.Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt).Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt).No more than 32 labels can be associated with a template. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'placement' + description: | + Specifies workflow execution target.Either managed_cluster or cluster_selector is required. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'managedCluster' + description: | + Cluster that is managed by the workflow. + properties: + - !ruby/object:Api::Type::String + name: 'clusterName' + description: | + Required. The cluster name prefix. A unique cluster name will be formed by appending a random suffix.The name must contain only lower-case letters (a-z), numbers (0-9), and hyphens (-). Must begin with a letter. Cannot begin or end with hyphen. Must consist of between 2 and 35 characters. + - !ruby/object:Api::Type::NestedObject + name: 'config' + description: | + The cluster config. + properties: + - !ruby/object:Api::Type::String + name: 'configBucket' + description: | + Optional. A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see Dataproc staging and temp buckets (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket. + - !ruby/object:Api::Type::String + name: 'tempBucket' + description: | + Optional. A Cloud Storage bucket used to store ephemeral cluster and jobs data, such as Spark and MapReduce history files. If you do not specify a temp bucket, Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's temp bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket. The default bucket has a TTL of 90 days, but you can use any TTL (or none) if you specify a bucket (see Dataproc staging and temp buckets (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket. + - !ruby/object:Api::Type::NestedObject + name: 'gceClusterConfig' + description: | + Common config settings for resources of Compute Engine cluster instances, applicable to all instances in the cluster. + properties: + - !ruby/object:Api::Type::String + name: 'zoneUri' + description: | + Optional. The Compute Engine zone where the Dataproc cluster will be located. If omitted, the service will pick a zone in the cluster's Compute Engine region. On a get request, zone will always be present.A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone] projects/[project_id]/zones/[zone] [zone] + - !ruby/object:Api::Type::String + name: 'networkUri' + description: | + Optional. The Compute Engine network to be used for machine communications. Cannot be specified with subnetwork_uri. If neither network_uri nor subnetwork_uri is specified, the "default" network of the project is used, if it exists. Cannot be a "Custom Subnet Network" (see Using Subnetworks (https://cloud.google.com/compute/docs/subnetworks) for more information).A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/global/networks/default projects/[project_id]/global/networks/default default + - !ruby/object:Api::Type::String + name: 'subnetworkUri' + description: | + Optional. The Compute Engine subnetwork to be used for machine communications. Cannot be specified with network_uri.A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/regions/[region]/subnetworks/sub0 projects/[project_id]/regions/[region]/subnetworks/sub0 sub0 + - !ruby/object:Api::Type::Boolean + name: 'internalIpOnly' + description: | + Optional. This setting applies to subnetwork-enabled networks. It is set to true by default in clusters created with image versions 2.2.x.When set to true: All cluster VMs have internal IP addresses. Google Private Access (https://cloud.google.com/vpc/docs/private-google-access) must be enabled to access Dataproc and other Google Cloud APIs. Off-cluster dependencies must be configured to be accessible without external IP addresses.When set to false: Cluster VMs are not restricted to internal IP addresses. Ephemeral external IP addresses are assigned to each cluster VM. + - !ruby/object:Api::Type::Enum + name: 'privateIpv6GoogleAccess' + description: | + Optional. The type of IPv6 access for a cluster. + values: + - :PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED + - :INHERIT_FROM_SUBNETWORK + - :OUTBOUND + - :BIDIRECTIONAL + - !ruby/object:Api::Type::String + name: 'serviceAccount' + description: | + Optional. The Dataproc service account (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/service-accounts#service_accounts_in_dataproc) (also see VM Data Plane identity (https://cloud.google.com/dataproc/docs/concepts/iam/dataproc-principals#vm_service_account_data_plane_identity)) used by Dataproc cluster VM instances to access Google Cloud Platform services.If not specified, the Compute Engine default service account (https://cloud.google.com/compute/docs/access/service-accounts#default_service_account) is used. + - !ruby/object:Api::Type::Array + name: 'serviceAccountScopes' + description: | + Optional. The URIs of service account scopes to be included in Compute Engine instances. The following base set of scopes is always included: https://www.googleapis.com/auth/cloud.useraccounts.readonly https://www.googleapis.com/auth/devstorage.read_write https://www.googleapis.com/auth/logging.writeIf no scopes are specified, the following defaults are also provided: https://www.googleapis.com/auth/bigquery https://www.googleapis.com/auth/bigtable.admin.table https://www.googleapis.com/auth/bigtable.data https://www.googleapis.com/auth/devstorage.full_control + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'tags' + description: | + The Compute Engine tags to add to all instances (see Tagging instances (https://cloud.google.com/compute/docs/label-or-tag-resources#tags)). + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'metadata' + description: | + Optional. The Compute Engine metadata entries to add to all instances (see Project and instance metadata (https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)). + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'reservationAffinity' + description: | + Reservation Affinity for consuming Zonal reservation. + properties: + - !ruby/object:Api::Type::Enum + name: 'consumeReservationType' + description: | + Optional. Type of reservation to consume + values: + - :TYPE_UNSPECIFIED + - :NO_RESERVATION + - :ANY_RESERVATION + - :SPECIFIC_RESERVATION + - !ruby/object:Api::Type::String + name: 'key' + description: | + Optional. Corresponds to the label key of reservation resource. + - !ruby/object:Api::Type::Array + name: 'values' + description: | + Optional. Corresponds to the label values of reservation resource. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'nodeGroupAffinity' + description: | + Node Group Affinity for clusters using sole-tenant node groups. The Dataproc NodeGroupAffinity resource is not related to the Dataproc NodeGroup resource. + properties: + - !ruby/object:Api::Type::String + name: 'nodeGroupUri' + description: | + Required. The URI of a sole-tenant node group resource (https://cloud.google.com/compute/docs/reference/rest/v1/nodeGroups) that the cluster will be created on.A full URL, partial URI, or node group name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/nodeGroups/node-group-1 projects/[project_id]/zones/[zone]/nodeGroups/node-group-1 node-group-1 + - !ruby/object:Api::Type::NestedObject + name: 'shieldedInstanceConfig' + description: | + Shielded Instance Config for clusters using Compute Engine Shielded VMs (https://cloud.google.com/security/shielded-cloud/shielded-vm). + properties: + - !ruby/object:Api::Type::Boolean + name: 'enableSecureBoot' + description: | + Optional. Defines whether instances have Secure Boot enabled. + - !ruby/object:Api::Type::Boolean + name: 'enableVtpm' + description: | + Optional. Defines whether instances have the vTPM enabled. + - !ruby/object:Api::Type::Boolean + name: 'enableIntegrityMonitoring' + description: | + Optional. Defines whether instances have integrity monitoring enabled. + - !ruby/object:Api::Type::NestedObject + name: 'confidentialInstanceConfig' + description: | + Confidential Instance Config for clusters using Confidential VMs (https://cloud.google.com/compute/confidential-vm/docs) + properties: + - !ruby/object:Api::Type::Boolean + name: 'enableConfidentialCompute' + description: | + Optional. Defines whether the instance should have confidential compute enabled. + - !ruby/object:Api::Type::NestedObject + name: 'masterConfig' + description: | + The config settings for Compute Engine resources in an instance group, such as a master or worker group. + properties: + - !ruby/object:Api::Type::Integer + name: 'numInstances' + description: | + Optional. The number of VM instances in the instance group. For HA cluster master_config groups, must be set to 3. For standard cluster master_config groups, must be set to 1. + - !ruby/object:Api::Type::Array + name: 'instanceNames' + description: | + Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'instanceReferences' + description: | + Output only. List of references to Compute Engine instances. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'instanceName' + description: | + The user-friendly name of the Compute Engine instance. + - !ruby/object:Api::Type::String + name: 'instanceId' + description: | + The unique identifier of the Compute Engine instance. + - !ruby/object:Api::Type::String + name: 'publicKey' + description: | + The public RSA key used for sharing data with this instance. + - !ruby/object:Api::Type::String + name: 'publicEciesKey' + description: | + The public ECIES key used for sharing data with this instance. + - !ruby/object:Api::Type::String + name: 'imageUri' + description: | + Optional. The Compute Engine image resource used for cluster instances.The URI can represent an image or image family.Image examples: https://www.googleapis.com/compute/v1/projects/[project_id]/global/images/[image-id] projects/[project_id]/global/images/[image-id] image-idImage family examples. Dataproc will use the most recent image from the family: https://www.googleapis.com/compute/v1/projects/[project_id]/global/images/family/[custom-image-family-name] projects/[project_id]/global/images/family/[custom-image-family-name]If the URI is unspecified, it will be inferred from SoftwareConfig.image_version or the system default. + - !ruby/object:Api::Type::String + name: 'machineTypeUri' + description: | + Optional. The Compute Engine machine type used for cluster instances.A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/machineTypes/n1-standard-2 projects/[project_id]/zones/[zone]/machineTypes/n1-standard-2 n1-standard-2Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, n1-standard-2. + - !ruby/object:Api::Type::NestedObject + name: 'diskConfig' + description: | + Specifies the config of disk options for a group of VM instances. + properties: + - !ruby/object:Api::Type::String + name: 'bootDiskType' + description: | + Optional. Type of the boot disk (default is "pd-standard"). Valid values: "pd-balanced" (Persistent Disk Balanced Solid State Drive), "pd-ssd" (Persistent Disk Solid State Drive), or "pd-standard" (Persistent Disk Hard Disk Drive). See Disk types (https://cloud.google.com/compute/docs/disks#disk-types). + - !ruby/object:Api::Type::Integer + name: 'bootDiskSizeGb' + description: | + Optional. Size in GB of the boot disk (default is 500GB). + - !ruby/object:Api::Type::Integer + name: 'numLocalSsds' + description: | + Optional. Number of attached SSDs, from 0 to 8 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and HDFS (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.Note: Local SSD options may vary by machine type and number of vCPUs selected. + - !ruby/object:Api::Type::String + name: 'localSsdInterface' + description: | + Optional. Interface type of local SSDs (default is "scsi"). Valid values: "scsi" (Small Computer System Interface), "nvme" (Non-Volatile Memory Express). See local SSD performance (https://cloud.google.com/compute/docs/disks/local-ssd#performance). + - !ruby/object:Api::Type::Boolean + name: 'isPreemptible' + description: | + Output only. Specifies that this instance group contains preemptible instances. + - !ruby/object:Api::Type::Enum + name: 'preemptibility' + description: | + Optional. Specifies the preemptibility of the instance group.The default value for master and worker groups is NON_PREEMPTIBLE. This default cannot be changed.The default value for secondary instances is PREEMPTIBLE. + values: + - :PREEMPTIBILITY_UNSPECIFIED + - :NON_PREEMPTIBLE + - :PREEMPTIBLE + - :SPOT + - !ruby/object:Api::Type::NestedObject + name: 'managedGroupConfig' + description: | + Specifies the resources used to actively manage an instance group. + properties: + - !ruby/object:Api::Type::String + name: 'instanceTemplateName' + description: | + Output only. The name of the Instance Template used for the Managed Instance Group. + - !ruby/object:Api::Type::String + name: 'instanceGroupManagerName' + description: | + Output only. The name of the Instance Group Manager for this group. + - !ruby/object:Api::Type::String + name: 'instanceGroupManagerUri' + description: | + Output only. The partial URI to the instance group manager for this group. E.g. projects/my-project/regions/us-central1/instanceGroupManagers/my-igm. + - !ruby/object:Api::Type::Array + name: 'accelerators' + description: | + Optional. The Compute Engine accelerator configuration for these instances. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'acceleratorTypeUri' + description: | + Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. + - !ruby/object:Api::Type::Integer + name: 'acceleratorCount' + description: | + The number of the accelerator cards of this type exposed to this instance. + - !ruby/object:Api::Type::String + name: 'minCpuPlatform' + description: | + Optional. Specifies the minimum cpu platform for the Instance Group. See Dataproc -> Minimum CPU Platform (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). + - !ruby/object:Api::Type::Integer + name: 'minNumInstances' + description: | + Optional. The minimum number of primary worker instances to create. If min_num_instances is set, cluster creation will succeed if the number of primary workers created is at least equal to the min_num_instances number.Example: Cluster creation request with num_instances = 5 and min_num_instances = 3: If 4 VMs are created and 1 instance fails, the failed VM is deleted. The cluster is resized to 4 instances and placed in a RUNNING state. If 2 instances are created and 3 instances fail, the cluster in placed in an ERROR state. The failed VMs are not deleted. + - !ruby/object:Api::Type::NestedObject + name: 'instanceFlexibilityPolicy' + description: | + Instance flexibility Policy allowing a mixture of VM shapes and provisioning models. + properties: + - !ruby/object:Api::Type::Array + name: 'instanceSelectionList' + description: | + Optional. List of instance selection options that the group will use when creating new VMs. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::Array + name: 'machineTypes' + description: | + Optional. Full machine-type names, e.g. "n1-standard-16". + item_type: Api::Type::String + - !ruby/object:Api::Type::Integer + name: 'rank' + description: | + Optional. Preference of this instance selection. Lower number means higher preference. Dataproc will first try to create a VM based on the machine-type with priority rank and fallback to next rank based on availability. Machine types and instance selections with the same priority have the same preference. + - !ruby/object:Api::Type::Array + name: 'instanceSelectionResults' + description: | + Output only. A list of instance selection results in the group. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'machineType' + description: | + Output only. Full machine-type names, e.g. "n1-standard-16". + - !ruby/object:Api::Type::Integer + name: 'vmCount' + description: | + Output only. Number of VM provisioned with the machine_type. + - !ruby/object:Api::Type::NestedObject + name: 'startupConfig' + description: | + Configuration to handle the startup of instances during cluster create and update process. + properties: + - !ruby/object:Api::Type::Integer + name: 'requiredRegistrationFraction' + description: | + Optional. The config setting to enable cluster creation/ updation to be successful only after required_registration_fraction of instances are up and running. This configuration is applicable to only secondary workers for now. The cluster will fail if required_registration_fraction of instances are not available. This will include instance creation, agent registration, and service registration (if enabled). + - !ruby/object:Api::Type::NestedObject + name: 'workerConfig' + description: | + The config settings for Compute Engine resources in an instance group, such as a master or worker group. + properties: + - !ruby/object:Api::Type::Integer + name: 'numInstances' + description: | + Optional. The number of VM instances in the instance group. For HA cluster master_config groups, must be set to 3. For standard cluster master_config groups, must be set to 1. + - !ruby/object:Api::Type::Array + name: 'instanceNames' + description: | + Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'instanceReferences' + description: | + Output only. List of references to Compute Engine instances. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'instanceName' + description: | + The user-friendly name of the Compute Engine instance. + - !ruby/object:Api::Type::String + name: 'instanceId' + description: | + The unique identifier of the Compute Engine instance. + - !ruby/object:Api::Type::String + name: 'publicKey' + description: | + The public RSA key used for sharing data with this instance. + - !ruby/object:Api::Type::String + name: 'publicEciesKey' + description: | + The public ECIES key used for sharing data with this instance. + - !ruby/object:Api::Type::String + name: 'imageUri' + description: | + Optional. The Compute Engine image resource used for cluster instances.The URI can represent an image or image family.Image examples: https://www.googleapis.com/compute/v1/projects/[project_id]/global/images/[image-id] projects/[project_id]/global/images/[image-id] image-idImage family examples. Dataproc will use the most recent image from the family: https://www.googleapis.com/compute/v1/projects/[project_id]/global/images/family/[custom-image-family-name] projects/[project_id]/global/images/family/[custom-image-family-name]If the URI is unspecified, it will be inferred from SoftwareConfig.image_version or the system default. + - !ruby/object:Api::Type::String + name: 'machineTypeUri' + description: | + Optional. The Compute Engine machine type used for cluster instances.A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/machineTypes/n1-standard-2 projects/[project_id]/zones/[zone]/machineTypes/n1-standard-2 n1-standard-2Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, n1-standard-2. + - !ruby/object:Api::Type::NestedObject + name: 'diskConfig' + description: | + Specifies the config of disk options for a group of VM instances. + properties: + - !ruby/object:Api::Type::String + name: 'bootDiskType' + description: | + Optional. Type of the boot disk (default is "pd-standard"). Valid values: "pd-balanced" (Persistent Disk Balanced Solid State Drive), "pd-ssd" (Persistent Disk Solid State Drive), or "pd-standard" (Persistent Disk Hard Disk Drive). See Disk types (https://cloud.google.com/compute/docs/disks#disk-types). + - !ruby/object:Api::Type::Integer + name: 'bootDiskSizeGb' + description: | + Optional. Size in GB of the boot disk (default is 500GB). + - !ruby/object:Api::Type::Integer + name: 'numLocalSsds' + description: | + Optional. Number of attached SSDs, from 0 to 8 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and HDFS (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.Note: Local SSD options may vary by machine type and number of vCPUs selected. + - !ruby/object:Api::Type::String + name: 'localSsdInterface' + description: | + Optional. Interface type of local SSDs (default is "scsi"). Valid values: "scsi" (Small Computer System Interface), "nvme" (Non-Volatile Memory Express). See local SSD performance (https://cloud.google.com/compute/docs/disks/local-ssd#performance). + - !ruby/object:Api::Type::Boolean + name: 'isPreemptible' + description: | + Output only. Specifies that this instance group contains preemptible instances. + - !ruby/object:Api::Type::Enum + name: 'preemptibility' + description: | + Optional. Specifies the preemptibility of the instance group.The default value for master and worker groups is NON_PREEMPTIBLE. This default cannot be changed.The default value for secondary instances is PREEMPTIBLE. + values: + - :PREEMPTIBILITY_UNSPECIFIED + - :NON_PREEMPTIBLE + - :PREEMPTIBLE + - :SPOT + - !ruby/object:Api::Type::NestedObject + name: 'managedGroupConfig' + description: | + Specifies the resources used to actively manage an instance group. + properties: + - !ruby/object:Api::Type::String + name: 'instanceTemplateName' + description: | + Output only. The name of the Instance Template used for the Managed Instance Group. + - !ruby/object:Api::Type::String + name: 'instanceGroupManagerName' + description: | + Output only. The name of the Instance Group Manager for this group. + - !ruby/object:Api::Type::String + name: 'instanceGroupManagerUri' + description: | + Output only. The partial URI to the instance group manager for this group. E.g. projects/my-project/regions/us-central1/instanceGroupManagers/my-igm. + - !ruby/object:Api::Type::Array + name: 'accelerators' + description: | + Optional. The Compute Engine accelerator configuration for these instances. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'acceleratorTypeUri' + description: | + Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. + - !ruby/object:Api::Type::Integer + name: 'acceleratorCount' + description: | + The number of the accelerator cards of this type exposed to this instance. + - !ruby/object:Api::Type::String + name: 'minCpuPlatform' + description: | + Optional. Specifies the minimum cpu platform for the Instance Group. See Dataproc -> Minimum CPU Platform (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). + - !ruby/object:Api::Type::Integer + name: 'minNumInstances' + description: | + Optional. The minimum number of primary worker instances to create. If min_num_instances is set, cluster creation will succeed if the number of primary workers created is at least equal to the min_num_instances number.Example: Cluster creation request with num_instances = 5 and min_num_instances = 3: If 4 VMs are created and 1 instance fails, the failed VM is deleted. The cluster is resized to 4 instances and placed in a RUNNING state. If 2 instances are created and 3 instances fail, the cluster in placed in an ERROR state. The failed VMs are not deleted. + - !ruby/object:Api::Type::NestedObject + name: 'instanceFlexibilityPolicy' + description: | + Instance flexibility Policy allowing a mixture of VM shapes and provisioning models. + properties: + - !ruby/object:Api::Type::Array + name: 'instanceSelectionList' + description: | + Optional. List of instance selection options that the group will use when creating new VMs. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::Array + name: 'machineTypes' + description: | + Optional. Full machine-type names, e.g. "n1-standard-16". + item_type: Api::Type::String + - !ruby/object:Api::Type::Integer + name: 'rank' + description: | + Optional. Preference of this instance selection. Lower number means higher preference. Dataproc will first try to create a VM based on the machine-type with priority rank and fallback to next rank based on availability. Machine types and instance selections with the same priority have the same preference. + - !ruby/object:Api::Type::Array + name: 'instanceSelectionResults' + description: | + Output only. A list of instance selection results in the group. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'machineType' + description: | + Output only. Full machine-type names, e.g. "n1-standard-16". + - !ruby/object:Api::Type::Integer + name: 'vmCount' + description: | + Output only. Number of VM provisioned with the machine_type. + - !ruby/object:Api::Type::NestedObject + name: 'startupConfig' + description: | + Configuration to handle the startup of instances during cluster create and update process. + properties: + - !ruby/object:Api::Type::Integer + name: 'requiredRegistrationFraction' + description: | + Optional. The config setting to enable cluster creation/ updation to be successful only after required_registration_fraction of instances are up and running. This configuration is applicable to only secondary workers for now. The cluster will fail if required_registration_fraction of instances are not available. This will include instance creation, agent registration, and service registration (if enabled). + - !ruby/object:Api::Type::NestedObject + name: 'secondaryWorkerConfig' + description: | + The config settings for Compute Engine resources in an instance group, such as a master or worker group. + properties: + - !ruby/object:Api::Type::Integer + name: 'numInstances' + description: | + Optional. The number of VM instances in the instance group. For HA cluster master_config groups, must be set to 3. For standard cluster master_config groups, must be set to 1. + - !ruby/object:Api::Type::Array + name: 'instanceNames' + description: | + Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'instanceReferences' + description: | + Output only. List of references to Compute Engine instances. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'instanceName' + description: | + The user-friendly name of the Compute Engine instance. + - !ruby/object:Api::Type::String + name: 'instanceId' + description: | + The unique identifier of the Compute Engine instance. + - !ruby/object:Api::Type::String + name: 'publicKey' + description: | + The public RSA key used for sharing data with this instance. + - !ruby/object:Api::Type::String + name: 'publicEciesKey' + description: | + The public ECIES key used for sharing data with this instance. + - !ruby/object:Api::Type::String + name: 'imageUri' + description: | + Optional. The Compute Engine image resource used for cluster instances.The URI can represent an image or image family.Image examples: https://www.googleapis.com/compute/v1/projects/[project_id]/global/images/[image-id] projects/[project_id]/global/images/[image-id] image-idImage family examples. Dataproc will use the most recent image from the family: https://www.googleapis.com/compute/v1/projects/[project_id]/global/images/family/[custom-image-family-name] projects/[project_id]/global/images/family/[custom-image-family-name]If the URI is unspecified, it will be inferred from SoftwareConfig.image_version or the system default. + - !ruby/object:Api::Type::String + name: 'machineTypeUri' + description: | + Optional. The Compute Engine machine type used for cluster instances.A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/machineTypes/n1-standard-2 projects/[project_id]/zones/[zone]/machineTypes/n1-standard-2 n1-standard-2Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, n1-standard-2. + - !ruby/object:Api::Type::NestedObject + name: 'diskConfig' + description: | + Specifies the config of disk options for a group of VM instances. + properties: + - !ruby/object:Api::Type::String + name: 'bootDiskType' + description: | + Optional. Type of the boot disk (default is "pd-standard"). Valid values: "pd-balanced" (Persistent Disk Balanced Solid State Drive), "pd-ssd" (Persistent Disk Solid State Drive), or "pd-standard" (Persistent Disk Hard Disk Drive). See Disk types (https://cloud.google.com/compute/docs/disks#disk-types). + - !ruby/object:Api::Type::Integer + name: 'bootDiskSizeGb' + description: | + Optional. Size in GB of the boot disk (default is 500GB). + - !ruby/object:Api::Type::Integer + name: 'numLocalSsds' + description: | + Optional. Number of attached SSDs, from 0 to 8 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and HDFS (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.Note: Local SSD options may vary by machine type and number of vCPUs selected. + - !ruby/object:Api::Type::String + name: 'localSsdInterface' + description: | + Optional. Interface type of local SSDs (default is "scsi"). Valid values: "scsi" (Small Computer System Interface), "nvme" (Non-Volatile Memory Express). See local SSD performance (https://cloud.google.com/compute/docs/disks/local-ssd#performance). + - !ruby/object:Api::Type::Boolean + name: 'isPreemptible' + description: | + Output only. Specifies that this instance group contains preemptible instances. + - !ruby/object:Api::Type::Enum + name: 'preemptibility' + description: | + Optional. Specifies the preemptibility of the instance group.The default value for master and worker groups is NON_PREEMPTIBLE. This default cannot be changed.The default value for secondary instances is PREEMPTIBLE. + values: + - :PREEMPTIBILITY_UNSPECIFIED + - :NON_PREEMPTIBLE + - :PREEMPTIBLE + - :SPOT + - !ruby/object:Api::Type::NestedObject + name: 'managedGroupConfig' + description: | + Specifies the resources used to actively manage an instance group. + properties: + - !ruby/object:Api::Type::String + name: 'instanceTemplateName' + description: | + Output only. The name of the Instance Template used for the Managed Instance Group. + - !ruby/object:Api::Type::String + name: 'instanceGroupManagerName' + description: | + Output only. The name of the Instance Group Manager for this group. + - !ruby/object:Api::Type::String + name: 'instanceGroupManagerUri' + description: | + Output only. The partial URI to the instance group manager for this group. E.g. projects/my-project/regions/us-central1/instanceGroupManagers/my-igm. + - !ruby/object:Api::Type::Array + name: 'accelerators' + description: | + Optional. The Compute Engine accelerator configuration for these instances. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'acceleratorTypeUri' + description: | + Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. + - !ruby/object:Api::Type::Integer + name: 'acceleratorCount' + description: | + The number of the accelerator cards of this type exposed to this instance. + - !ruby/object:Api::Type::String + name: 'minCpuPlatform' + description: | + Optional. Specifies the minimum cpu platform for the Instance Group. See Dataproc -> Minimum CPU Platform (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). + - !ruby/object:Api::Type::Integer + name: 'minNumInstances' + description: | + Optional. The minimum number of primary worker instances to create. If min_num_instances is set, cluster creation will succeed if the number of primary workers created is at least equal to the min_num_instances number.Example: Cluster creation request with num_instances = 5 and min_num_instances = 3: If 4 VMs are created and 1 instance fails, the failed VM is deleted. The cluster is resized to 4 instances and placed in a RUNNING state. If 2 instances are created and 3 instances fail, the cluster in placed in an ERROR state. The failed VMs are not deleted. + - !ruby/object:Api::Type::NestedObject + name: 'instanceFlexibilityPolicy' + description: | + Instance flexibility Policy allowing a mixture of VM shapes and provisioning models. + properties: + - !ruby/object:Api::Type::Array + name: 'instanceSelectionList' + description: | + Optional. List of instance selection options that the group will use when creating new VMs. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::Array + name: 'machineTypes' + description: | + Optional. Full machine-type names, e.g. "n1-standard-16". + item_type: Api::Type::String + - !ruby/object:Api::Type::Integer + name: 'rank' + description: | + Optional. Preference of this instance selection. Lower number means higher preference. Dataproc will first try to create a VM based on the machine-type with priority rank and fallback to next rank based on availability. Machine types and instance selections with the same priority have the same preference. + - !ruby/object:Api::Type::Array + name: 'instanceSelectionResults' + description: | + Output only. A list of instance selection results in the group. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'machineType' + description: | + Output only. Full machine-type names, e.g. "n1-standard-16". + - !ruby/object:Api::Type::Integer + name: 'vmCount' + description: | + Output only. Number of VM provisioned with the machine_type. + - !ruby/object:Api::Type::NestedObject + name: 'startupConfig' + description: | + Configuration to handle the startup of instances during cluster create and update process. + properties: + - !ruby/object:Api::Type::Integer + name: 'requiredRegistrationFraction' + description: | + Optional. The config setting to enable cluster creation/ updation to be successful only after required_registration_fraction of instances are up and running. This configuration is applicable to only secondary workers for now. The cluster will fail if required_registration_fraction of instances are not available. This will include instance creation, agent registration, and service registration (if enabled). + - !ruby/object:Api::Type::NestedObject + name: 'softwareConfig' + description: | + Specifies the selection and config of software inside the cluster. + properties: + - !ruby/object:Api::Type::String + name: 'imageVersion' + description: | + Optional. The version of software inside the cluster. It must be one of the supported Dataproc Versions (https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#supported_dataproc_versions), such as "1.2" (including a subminor version, such as "1.2.29"), or the "preview" version (https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#other_versions). If unspecified, it defaults to the latest Debian version. + - !ruby/object:Api::Type::NestedObject + name: 'properties' + description: | + Optional. The properties to set on daemon config files.Property keys are specified in prefix:property format, for example core:hadoop.tmp.dir. The following are supported prefixes and their mappings: capacity-scheduler: capacity-scheduler.xml core: core-site.xml distcp: distcp-default.xml hdfs: hdfs-site.xml hive: hive-site.xml mapred: mapred-site.xml pig: pig.properties spark: spark-defaults.conf yarn: yarn-site.xmlFor more information, see Cluster properties (https://cloud.google.com/dataproc/docs/concepts/cluster-properties). + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::Array + name: 'optionalComponents' + description: | + Optional. The set of components to activate on the cluster. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'initializationActions' + description: | + Optional. Commands to execute on each node after config is completed. By default, executables are run on master and all worker nodes. You can test a node's role metadata to run an executable on a master or worker node, as shown below using curl (you can also use wget): ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) if [[ "${ROLE}" == 'Master' ]]; then ... master specific actions ... else ... worker specific actions ... fi + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'executableFile' + description: | + Required. Cloud Storage URI of executable file. + - !ruby/object:Api::Type::String + name: 'executionTimeout' + description: | + Optional. Amount of time executable has to complete. Default is 10 minutes (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)).Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period. + - !ruby/object:Api::Type::NestedObject + name: 'encryptionConfig' + description: | + Encryption settings for the cluster. + properties: + - !ruby/object:Api::Type::String + name: 'gcePdKmsKeyName' + description: | + Optional. The Cloud KMS key resource name to use for persistent disk encryption for all instances in the cluster. See Use CMEK with cluster data (https://cloud.google.com//dataproc/docs/concepts/configuring-clusters/customer-managed-encryption#use_cmek_with_cluster_data) for more information. + - !ruby/object:Api::Type::String + name: 'kmsKey' + description: | + Optional. The Cloud KMS key resource name to use for cluster persistent disk and job argument encryption. See Use CMEK with cluster data (https://cloud.google.com//dataproc/docs/concepts/configuring-clusters/customer-managed-encryption#use_cmek_with_cluster_data) for more information.When this key resource name is provided, the following job arguments of the following job types submitted to the cluster are encrypted using CMEK: FlinkJob args (https://cloud.google.com/dataproc/docs/reference/rest/v1/FlinkJob) HadoopJob args (https://cloud.google.com/dataproc/docs/reference/rest/v1/HadoopJob) SparkJob args (https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkJob) SparkRJob args (https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkRJob) PySparkJob args (https://cloud.google.com/dataproc/docs/reference/rest/v1/PySparkJob) SparkSqlJob (https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkSqlJob) scriptVariables and queryList.queries HiveJob (https://cloud.google.com/dataproc/docs/reference/rest/v1/HiveJob) scriptVariables and queryList.queries PigJob (https://cloud.google.com/dataproc/docs/reference/rest/v1/PigJob) scriptVariables and queryList.queries PrestoJob (https://cloud.google.com/dataproc/docs/reference/rest/v1/PrestoJob) scriptVariables and queryList.queries + - !ruby/object:Api::Type::NestedObject + name: 'autoscalingConfig' + description: | + Autoscaling Policy config associated with the cluster. + properties: + - !ruby/object:Api::Type::String + name: 'policyUri' + description: | + Optional. The autoscaling policy used by the cluster.Only resource names including projectid and location (region) are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id] projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]Note that the policy must be in the same project and Dataproc region. + - !ruby/object:Api::Type::NestedObject + name: 'securityConfig' + description: | + Security related configuration, including encryption, Kerberos, etc. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'kerberosConfig' + description: | + Specifies Kerberos related configuration. + properties: + - !ruby/object:Api::Type::Boolean + name: 'enableKerberos' + description: | + Optional. Flag to indicate whether to Kerberize the cluster (default: false). Set this field to true to enable Kerberos on a cluster. + - !ruby/object:Api::Type::String + name: 'rootPrincipalPasswordUri' + description: | + Optional. The Cloud Storage URI of a KMS encrypted file containing the root principal password. + - !ruby/object:Api::Type::String + name: 'kmsKeyUri' + description: | + Optional. The URI of the KMS key used to encrypt sensitive files. + - !ruby/object:Api::Type::String + name: 'keystoreUri' + description: | + Optional. The Cloud Storage URI of the keystore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate. + - !ruby/object:Api::Type::String + name: 'truststoreUri' + description: | + Optional. The Cloud Storage URI of the truststore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate. + - !ruby/object:Api::Type::String + name: 'keystorePasswordUri' + description: | + Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided keystore. For the self-signed certificate, this password is generated by Dataproc. + - !ruby/object:Api::Type::String + name: 'keyPasswordUri' + description: | + Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided key. For the self-signed certificate, this password is generated by Dataproc. + - !ruby/object:Api::Type::String + name: 'truststorePasswordUri' + description: | + Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided truststore. For the self-signed certificate, this password is generated by Dataproc. + - !ruby/object:Api::Type::String + name: 'crossRealmTrustRealm' + description: | + Optional. The remote realm the Dataproc on-cluster KDC will trust, should the user enable cross realm trust. + - !ruby/object:Api::Type::String + name: 'crossRealmTrustKdc' + description: | + Optional. The KDC (IP or hostname) for the remote trusted realm in a cross realm trust relationship. + - !ruby/object:Api::Type::String + name: 'crossRealmTrustAdminServer' + description: | + Optional. The admin server (IP or hostname) for the remote trusted realm in a cross realm trust relationship. + - !ruby/object:Api::Type::String + name: 'crossRealmTrustSharedPasswordUri' + description: | + Optional. The Cloud Storage URI of a KMS encrypted file containing the shared password between the on-cluster Kerberos realm and the remote trusted realm, in a cross realm trust relationship. + - !ruby/object:Api::Type::String + name: 'kdcDbKeyUri' + description: | + Optional. The Cloud Storage URI of a KMS encrypted file containing the master key of the KDC database. + - !ruby/object:Api::Type::Integer + name: 'tgtLifetimeHours' + description: | + Optional. The lifetime of the ticket granting ticket, in hours. If not specified, or user specifies 0, then default value 10 will be used. + - !ruby/object:Api::Type::String + name: 'realm' + description: | + Optional. The name of the on-cluster Kerberos realm. If not specified, the uppercased domain of hostnames will be the realm. + - !ruby/object:Api::Type::NestedObject + name: 'identityConfig' + description: | + Identity related configuration, including service account based secure multi-tenancy user mappings. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'userServiceAccountMapping' + description: | + Required. Map of user to service account. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'lifecycleConfig' + description: | + Specifies the cluster auto-delete schedule configuration. + properties: + - !ruby/object:Api::Type::String + name: 'idleDeleteTtl' + description: | + Optional. The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). + - !ruby/object:Api::Type::String + name: 'autoDeleteTime' + description: | + Optional. The time when cluster will be auto-deleted (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)). + - !ruby/object:Api::Type::String + name: 'autoDeleteTtl' + description: | + Optional. The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). + - !ruby/object:Api::Type::String + name: 'idleStartTime' + description: | + Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)). + - !ruby/object:Api::Type::NestedObject + name: 'endpointConfig' + description: | + Endpoint config for this cluster + properties: + - !ruby/object:Api::Type::NestedObject + name: 'httpPorts' + description: | + Output only. The map of port descriptions to URLs. Will only be populated if enable_http_port_access is true. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::Boolean + name: 'enableHttpPortAccess' + description: | + Optional. If true, enable http access to specific ports on the cluster from external sources. Defaults to false. + - !ruby/object:Api::Type::NestedObject + name: 'metastoreConfig' + description: | + Specifies a Metastore configuration. + properties: + - !ruby/object:Api::Type::String + name: 'dataprocMetastoreService' + description: | + Required. Resource name of an existing Dataproc Metastore service.Example: projects/[project_id]/locations/[dataproc_region]/services/[service-name] + - !ruby/object:Api::Type::NestedObject + name: 'gkeClusterConfig' + description: | + The cluster's GKE config. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'namespacedGkeDeploymentTarget' + description: | + Deprecated. Used only for the deprecated beta. A full, namespace-isolated deployment target for an existing GKE cluster. + properties: + - !ruby/object:Api::Type::String + name: 'targetGkeCluster' + description: | + Optional. The target GKE cluster to deploy to. Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}' + - !ruby/object:Api::Type::String + name: 'clusterNamespace' + description: | + Optional. A namespace within the GKE cluster to deploy into. + - !ruby/object:Api::Type::String + name: 'gkeClusterTarget' + description: | + Optional. A target GKE cluster to deploy to. It must be in the same project and region as the Dataproc cluster (the GKE cluster can be zonal or regional). Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}' + - !ruby/object:Api::Type::Array + name: 'nodePoolTarget' + description: | + Optional. GKE node pools where workloads will be scheduled. At least one node pool must be assigned the DEFAULT GkeNodePoolTarget.Role. If a GkeNodePoolTarget is not specified, Dataproc constructs a DEFAULT GkeNodePoolTarget. Each role can be given to only one GkeNodePoolTarget. All node pools must have the same location settings. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'nodePool' + description: | + Required. The target GKE node pool. Format: 'projects/{project}/locations/{location}/clusters/{cluster}/nodePools/{node_pool}' + - !ruby/object:Api::Type::Array + name: 'roles' + description: | + Required. The roles associated with the GKE node pool. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'nodePoolConfig' + description: | + The configuration of a GKE node pool used by a Dataproc-on-GKE cluster (https://cloud.google.com/dataproc/docs/concepts/jobs/dataproc-gke#create-a-dataproc-on-gke-cluster). + properties: + - !ruby/object:Api::Type::NestedObject + name: 'config' + description: | + Parameters that describe cluster nodes. + properties: + - !ruby/object:Api::Type::String + name: 'machineType' + description: | + Optional. The name of a Compute Engine machine type (https://cloud.google.com/compute/docs/machine-types). + - !ruby/object:Api::Type::Integer + name: 'localSsdCount' + description: | + Optional. The number of local SSD disks to attach to the node, which is limited by the maximum number of disks allowable per zone (see Adding Local SSDs (https://cloud.google.com/compute/docs/disks/local-ssd)). + - !ruby/object:Api::Type::Boolean + name: 'preemptible' + description: | + Optional. Whether the nodes are created as legacy preemptible VM instances (https://cloud.google.com/compute/docs/instances/preemptible). Also see Spot VMs, preemptible VM instances without a maximum lifetime. Legacy and Spot preemptible nodes cannot be used in a node pool with the CONTROLLER role or in the DEFAULT node pool if the CONTROLLER role is not assigned (the DEFAULT node pool will assume the CONTROLLER role). + - !ruby/object:Api::Type::Array + name: 'accelerators' + description: | + Optional. A list of hardware accelerators (https://cloud.google.com/compute/docs/gpus) to attach to each node. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'acceleratorCount' + description: | + The number of accelerator cards exposed to an instance. + - !ruby/object:Api::Type::String + name: 'acceleratorType' + description: | + The accelerator type resource namename (see GPUs on Compute Engine). + - !ruby/object:Api::Type::String + name: 'gpuPartitionSize' + description: | + Size of partitions to create on the GPU. Valid values are described in the NVIDIA mig user guide (https://docs.nvidia.com/datacenter/tesla/mig-user-guide/#partitioning). + - !ruby/object:Api::Type::String + name: 'minCpuPlatform' + description: | + Optional. Minimum CPU platform (https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform) to be used by this instance. The instance may be scheduled on the specified or a newer CPU platform. Specify the friendly names of CPU platforms, such as "Intel Haswell"` or Intel Sandy Bridge". + - !ruby/object:Api::Type::String + name: 'bootDiskKmsKey' + description: | + Optional. The Customer Managed Encryption Key (CMEK) (https://cloud.google.com/kubernetes-engine/docs/how-to/using-cmek) used to encrypt the boot disk attached to each node in the node pool. Specify the key using the following format: projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key} + - !ruby/object:Api::Type::Boolean + name: 'spot' + description: | + Optional. Whether the nodes are created as Spot VM instances (https://cloud.google.com/compute/docs/instances/spot). Spot VMs are the latest update to legacy preemptible VMs. Spot VMs do not have a maximum lifetime. Legacy and Spot preemptible nodes cannot be used in a node pool with the CONTROLLER role or in the DEFAULT node pool if the CONTROLLER role is not assigned (the DEFAULT node pool will assume the CONTROLLER role). + - !ruby/object:Api::Type::Array + name: 'locations' + description: | + Optional. The list of Compute Engine zones (https://cloud.google.com/compute/docs/zones#available) where node pool nodes associated with a Dataproc on GKE virtual cluster will be located.Note: All node pools associated with a virtual cluster must be located in the same region as the virtual cluster, and they must be located in the same zone within that region.If a location is not specified during node pool creation, Dataproc on GKE will choose the zone. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'autoscaling' + description: | + GkeNodePoolAutoscaling contains information the cluster autoscaler needs to adjust the size of the node pool to the current cluster usage. + properties: + - !ruby/object:Api::Type::Integer + name: 'minNodeCount' + description: | + The minimum number of nodes in the node pool. Must be >= 0 and <= max_node_count. + - !ruby/object:Api::Type::Integer + name: 'maxNodeCount' + description: | + The maximum number of nodes in the node pool. Must be >= min_node_count, and must be > 0. Note: Quota must be sufficient to scale up the cluster. + - !ruby/object:Api::Type::NestedObject + name: 'dataprocMetricConfig' + description: | + Dataproc metric config. + properties: + - !ruby/object:Api::Type::Array + name: 'metrics' + description: | + Required. Metrics sources to enable. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::Enum + name: 'metricSource' + description: | + Required. A standard set of metrics is collected unless metricOverrides are specified for the metric source (see Custom metrics (https://cloud.google.com/dataproc/docs/guides/dataproc-metrics#custom_metrics) for more information). + values: + - :METRIC_SOURCE_UNSPECIFIED + - :MONITORING_AGENT_DEFAULTS + - :HDFS + - :SPARK + - :YARN + - :SPARK_HISTORY_SERVER + - :HIVESERVER2 + - :HIVEMETASTORE + - :FLINK + - !ruby/object:Api::Type::Array + name: 'metricOverrides' + description: | + Optional. Specify one or more Custom metrics (https://cloud.google.com/dataproc/docs/guides/dataproc-metrics#custom_metrics) to collect for the metric course (for the SPARK metric source (any Spark metric (https://spark.apache.org/docs/latest/monitoring.html#metrics) can be specified).Provide metrics in the following format: METRIC_SOURCE: INSTANCE:GROUP:METRIC Use camelcase as appropriate.Examples: yarn:ResourceManager:QueueMetrics:AppsCompleted spark:driver:DAGScheduler:job.allJobs sparkHistoryServer:JVM:Memory:NonHeapMemoryUsage.committed hiveserver2:JVM:Memory:NonHeapMemoryUsage.used Notes: Only the specified overridden metrics are collected for the metric source. For example, if one or more spark:executive metrics are listed as metric overrides, other SPARK metrics are not collected. The collection of the metrics for other enabled custom metric sources is unaffected. For example, if both SPARK andd YARN metric sources are enabled, and overrides are provided for Spark metrics only, all YARN metrics are collected. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'auxiliaryNodeGroups' + description: | + Optional. The node group settings. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::NestedObject + name: 'nodeGroup' + description: | + Dataproc Node Group. The Dataproc NodeGroup resource is not related to the Dataproc NodeGroupAffinity resource. + properties: + - !ruby/object:Api::Type::String + name: 'name' + description: | + The Node group resource name (https://aip.dev/122). + - !ruby/object:Api::Type::Array + name: 'roles' + description: | + Required. Node group roles. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'nodeGroupConfig' + description: | + The config settings for Compute Engine resources in an instance group, such as a master or worker group. + properties: + - !ruby/object:Api::Type::Integer + name: 'numInstances' + description: | + Optional. The number of VM instances in the instance group. For HA cluster master_config groups, must be set to 3. For standard cluster master_config groups, must be set to 1. + - !ruby/object:Api::Type::Array + name: 'instanceNames' + description: | + Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'instanceReferences' + description: | + Output only. List of references to Compute Engine instances. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'instanceName' + description: | + The user-friendly name of the Compute Engine instance. + - !ruby/object:Api::Type::String + name: 'instanceId' + description: | + The unique identifier of the Compute Engine instance. + - !ruby/object:Api::Type::String + name: 'publicKey' + description: | + The public RSA key used for sharing data with this instance. + - !ruby/object:Api::Type::String + name: 'publicEciesKey' + description: | + The public ECIES key used for sharing data with this instance. + - !ruby/object:Api::Type::String + name: 'imageUri' + description: | + Optional. The Compute Engine image resource used for cluster instances.The URI can represent an image or image family.Image examples: https://www.googleapis.com/compute/v1/projects/[project_id]/global/images/[image-id] projects/[project_id]/global/images/[image-id] image-idImage family examples. Dataproc will use the most recent image from the family: https://www.googleapis.com/compute/v1/projects/[project_id]/global/images/family/[custom-image-family-name] projects/[project_id]/global/images/family/[custom-image-family-name]If the URI is unspecified, it will be inferred from SoftwareConfig.image_version or the system default. + - !ruby/object:Api::Type::String + name: 'machineTypeUri' + description: | + Optional. The Compute Engine machine type used for cluster instances.A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/machineTypes/n1-standard-2 projects/[project_id]/zones/[zone]/machineTypes/n1-standard-2 n1-standard-2Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, n1-standard-2. + - !ruby/object:Api::Type::NestedObject + name: 'diskConfig' + description: | + Specifies the config of disk options for a group of VM instances. + properties: + - !ruby/object:Api::Type::String + name: 'bootDiskType' + description: | + Optional. Type of the boot disk (default is "pd-standard"). Valid values: "pd-balanced" (Persistent Disk Balanced Solid State Drive), "pd-ssd" (Persistent Disk Solid State Drive), or "pd-standard" (Persistent Disk Hard Disk Drive). See Disk types (https://cloud.google.com/compute/docs/disks#disk-types). + - !ruby/object:Api::Type::Integer + name: 'bootDiskSizeGb' + description: | + Optional. Size in GB of the boot disk (default is 500GB). + - !ruby/object:Api::Type::Integer + name: 'numLocalSsds' + description: | + Optional. Number of attached SSDs, from 0 to 8 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and HDFS (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.Note: Local SSD options may vary by machine type and number of vCPUs selected. + - !ruby/object:Api::Type::String + name: 'localSsdInterface' + description: | + Optional. Interface type of local SSDs (default is "scsi"). Valid values: "scsi" (Small Computer System Interface), "nvme" (Non-Volatile Memory Express). See local SSD performance (https://cloud.google.com/compute/docs/disks/local-ssd#performance). + - !ruby/object:Api::Type::Boolean + name: 'isPreemptible' + description: | + Output only. Specifies that this instance group contains preemptible instances. + - !ruby/object:Api::Type::Enum + name: 'preemptibility' + description: | + Optional. Specifies the preemptibility of the instance group.The default value for master and worker groups is NON_PREEMPTIBLE. This default cannot be changed.The default value for secondary instances is PREEMPTIBLE. + values: + - :PREEMPTIBILITY_UNSPECIFIED + - :NON_PREEMPTIBLE + - :PREEMPTIBLE + - :SPOT + - !ruby/object:Api::Type::NestedObject + name: 'managedGroupConfig' + description: | + Specifies the resources used to actively manage an instance group. + properties: + - !ruby/object:Api::Type::String + name: 'instanceTemplateName' + description: | + Output only. The name of the Instance Template used for the Managed Instance Group. + - !ruby/object:Api::Type::String + name: 'instanceGroupManagerName' + description: | + Output only. The name of the Instance Group Manager for this group. + - !ruby/object:Api::Type::String + name: 'instanceGroupManagerUri' + description: | + Output only. The partial URI to the instance group manager for this group. E.g. projects/my-project/regions/us-central1/instanceGroupManagers/my-igm. + - !ruby/object:Api::Type::Array + name: 'accelerators' + description: | + Optional. The Compute Engine accelerator configuration for these instances. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'acceleratorTypeUri' + description: | + Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. + - !ruby/object:Api::Type::Integer + name: 'acceleratorCount' + description: | + The number of the accelerator cards of this type exposed to this instance. + - !ruby/object:Api::Type::String + name: 'minCpuPlatform' + description: | + Optional. Specifies the minimum cpu platform for the Instance Group. See Dataproc -> Minimum CPU Platform (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). + - !ruby/object:Api::Type::Integer + name: 'minNumInstances' + description: | + Optional. The minimum number of primary worker instances to create. If min_num_instances is set, cluster creation will succeed if the number of primary workers created is at least equal to the min_num_instances number.Example: Cluster creation request with num_instances = 5 and min_num_instances = 3: If 4 VMs are created and 1 instance fails, the failed VM is deleted. The cluster is resized to 4 instances and placed in a RUNNING state. If 2 instances are created and 3 instances fail, the cluster in placed in an ERROR state. The failed VMs are not deleted. + - !ruby/object:Api::Type::NestedObject + name: 'instanceFlexibilityPolicy' + description: | + Instance flexibility Policy allowing a mixture of VM shapes and provisioning models. + properties: + - !ruby/object:Api::Type::Array + name: 'instanceSelectionList' + description: | + Optional. List of instance selection options that the group will use when creating new VMs. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::Array + name: 'machineTypes' + description: | + Optional. Full machine-type names, e.g. "n1-standard-16". + item_type: Api::Type::String + - !ruby/object:Api::Type::Integer + name: 'rank' + description: | + Optional. Preference of this instance selection. Lower number means higher preference. Dataproc will first try to create a VM based on the machine-type with priority rank and fallback to next rank based on availability. Machine types and instance selections with the same priority have the same preference. + - !ruby/object:Api::Type::Array + name: 'instanceSelectionResults' + description: | + Output only. A list of instance selection results in the group. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'machineType' + description: | + Output only. Full machine-type names, e.g. "n1-standard-16". + - !ruby/object:Api::Type::Integer + name: 'vmCount' + description: | + Output only. Number of VM provisioned with the machine_type. + - !ruby/object:Api::Type::NestedObject + name: 'startupConfig' + description: | + Configuration to handle the startup of instances during cluster create and update process. + properties: + - !ruby/object:Api::Type::Integer + name: 'requiredRegistrationFraction' + description: | + Optional. The config setting to enable cluster creation/ updation to be successful only after required_registration_fraction of instances are up and running. This configuration is applicable to only secondary workers for now. The cluster will fail if required_registration_fraction of instances are not available. This will include instance creation, agent registration, and service registration (if enabled). + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + Optional. Node group labels. Label keys must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty. If specified, they must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). The node group must have no more than 32 labelsn. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::String + name: 'nodeGroupId' + description: | + Optional. A node group ID. Generated if not specified.The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of from 3 to 33 characters. + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + Optional. The labels to associate with this cluster.Label keys must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: \p{Ll}\p{Lo}{0,62}Label values must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: \p{Ll}\p{Lo}\p{N}_-{0,63}No more than 32 labels can be associated with a given cluster. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'clusterSelector' + description: | + A selector that chooses target cluster for jobs based on metadata. + properties: + - !ruby/object:Api::Type::String + name: 'zone' + description: | + Optional. The zone where workflow process executes. This parameter does not affect the selection of the cluster.If unspecified, the zone of the first cluster matching the selector is used. + - !ruby/object:Api::Type::NestedObject + name: 'clusterLabels' + description: | + Required. The cluster labels. Cluster must have all labels to match. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::Array + name: 'jobs' + description: | + Required. The Directed Acyclic Graph of Jobs to submit. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'stepId' + description: | + Required. The step id. The id must be unique among all jobs within the template.The step id is used as prefix for job id, as job goog-dataproc-workflow-step-id label, and in prerequisiteStepIds field from other steps.The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between 3 and 50 characters. + - !ruby/object:Api::Type::NestedObject + name: 'hadoopJob' + description: | + A Dataproc job for running Apache Hadoop MapReduce (https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html) jobs on Apache Hadoop YARN (https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html). + properties: + - !ruby/object:Api::Type::String + name: 'mainJarFileUri' + description: | + The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar' + - !ruby/object:Api::Type::String + name: 'mainClass' + description: | + The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in jar_file_uris. + - !ruby/object:Api::Type::Array + name: 'args' + description: | + Optional. The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision might occur that causes an incorrect job submission. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'jarFileUris' + description: | + Optional. Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'fileUris' + description: | + Optional. HCFS (Hadoop Compatible Filesystem) URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'archiveUris' + description: | + Optional. HCFS URIs of archives to be extracted in the working directory of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .zip. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'properties' + description: | + Optional. A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes in user code. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'loggingConfig' + description: | + The runtime logging config of the job. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'driverLogLevels' + description: | + The per-package log levels for the driver. This can include "root" package name to configure rootLogger. Examples: - 'com.google = FATAL' - 'root = INFO' - 'org.apache = DEBUG' + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'sparkJob' + description: | + A Dataproc job for running Apache Spark (https://spark.apache.org/) applications on YARN. + properties: + - !ruby/object:Api::Type::String + name: 'mainJarFileUri' + description: | + The HCFS URI of the jar file that contains the main class. + - !ruby/object:Api::Type::String + name: 'mainClass' + description: | + The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in SparkJob.jar_file_uris. + - !ruby/object:Api::Type::Array + name: 'args' + description: | + Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'jarFileUris' + description: | + Optional. HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'fileUris' + description: | + Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'archiveUris' + description: | + Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'properties' + description: | + Optional. A mapping of property names to values, used to configure Spark. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'loggingConfig' + description: | + The runtime logging config of the job. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'driverLogLevels' + description: | + The per-package log levels for the driver. This can include "root" package name to configure rootLogger. Examples: - 'com.google = FATAL' - 'root = INFO' - 'org.apache = DEBUG' + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'pysparkJob' + description: | + A Dataproc job for running Apache PySpark (https://spark.apache.org/docs/0.9.0/python-programming-guide.html) applications on YARN. + properties: + - !ruby/object:Api::Type::String + name: 'mainPythonFileUri' + description: | + Required. The HCFS URI of the main Python file to use as the driver. Must be a .py file. + - !ruby/object:Api::Type::Array + name: 'args' + description: | + Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'pythonFileUris' + description: | + Optional. HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'jarFileUris' + description: | + Optional. HCFS URIs of jar files to add to the CLASSPATHs of the Python driver and tasks. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'fileUris' + description: | + Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'archiveUris' + description: | + Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'properties' + description: | + Optional. A mapping of property names to values, used to configure PySpark. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'loggingConfig' + description: | + The runtime logging config of the job. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'driverLogLevels' + description: | + The per-package log levels for the driver. This can include "root" package name to configure rootLogger. Examples: - 'com.google = FATAL' - 'root = INFO' - 'org.apache = DEBUG' + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'hiveJob' + description: | + A Dataproc job for running Apache Hive (https://hive.apache.org/) queries on YARN. + properties: + - !ruby/object:Api::Type::String + name: 'queryFileUri' + description: | + The HCFS URI of the script that contains Hive queries. + - !ruby/object:Api::Type::NestedObject + name: 'queryList' + description: | + A list of queries to run on a cluster. + properties: + - !ruby/object:Api::Type::Array + name: 'queries' + description: | + Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": [ "query1", "query2", "query3;query4", ] } } + item_type: Api::Type::String + - !ruby/object:Api::Type::Boolean + name: 'continueOnFailure' + description: | + Optional. Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries. + - !ruby/object:Api::Type::NestedObject + name: 'scriptVariables' + description: | + Optional. Mapping of query variable names to values (equivalent to the Hive command: SET name="value";). + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'properties' + description: | + Optional. A mapping of property names and values, used to configure Hive. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/hive/conf/hive-site.xml, and classes in user code. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::Array + name: 'jarFileUris' + description: | + Optional. HCFS URIs of jar files to add to the CLASSPATH of the Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes and UDFs. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'pigJob' + description: | + A Dataproc job for running Apache Pig (https://pig.apache.org/) queries on YARN. + properties: + - !ruby/object:Api::Type::String + name: 'queryFileUri' + description: | + The HCFS URI of the script that contains the Pig queries. + - !ruby/object:Api::Type::NestedObject + name: 'queryList' + description: | + A list of queries to run on a cluster. + properties: + - !ruby/object:Api::Type::Array + name: 'queries' + description: | + Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": [ "query1", "query2", "query3;query4", ] } } + item_type: Api::Type::String + - !ruby/object:Api::Type::Boolean + name: 'continueOnFailure' + description: | + Optional. Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries. + - !ruby/object:Api::Type::NestedObject + name: 'scriptVariables' + description: | + Optional. Mapping of query variable names to values (equivalent to the Pig command: name=[value]). + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'properties' + description: | + Optional. A mapping of property names to values, used to configure Pig. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/pig/conf/pig.properties, and classes in user code. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::Array + name: 'jarFileUris' + description: | + Optional. HCFS URIs of jar files to add to the CLASSPATH of the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'loggingConfig' + description: | + The runtime logging config of the job. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'driverLogLevels' + description: | + The per-package log levels for the driver. This can include "root" package name to configure rootLogger. Examples: - 'com.google = FATAL' - 'root = INFO' - 'org.apache = DEBUG' + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'sparkRJob' + description: | + A Dataproc job for running Apache SparkR (https://spark.apache.org/docs/latest/sparkr.html) applications on YARN. + properties: + - !ruby/object:Api::Type::String + name: 'mainRFileUri' + description: | + Required. The HCFS URI of the main R file to use as the driver. Must be a .R file. + - !ruby/object:Api::Type::Array + name: 'args' + description: | + Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'fileUris' + description: | + Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'archiveUris' + description: | + Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'properties' + description: | + Optional. A mapping of property names to values, used to configure SparkR. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'loggingConfig' + description: | + The runtime logging config of the job. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'driverLogLevels' + description: | + The per-package log levels for the driver. This can include "root" package name to configure rootLogger. Examples: - 'com.google = FATAL' - 'root = INFO' - 'org.apache = DEBUG' + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'sparkSqlJob' + description: | + A Dataproc job for running Apache Spark SQL (https://spark.apache.org/sql/) queries. + properties: + - !ruby/object:Api::Type::String + name: 'queryFileUri' + description: | + The HCFS URI of the script that contains SQL queries. + - !ruby/object:Api::Type::NestedObject + name: 'queryList' + description: | + A list of queries to run on a cluster. + properties: + - !ruby/object:Api::Type::Array + name: 'queries' + description: | + Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": [ "query1", "query2", "query3;query4", ] } } + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'scriptVariables' + description: | + Optional. Mapping of query variable names to values (equivalent to the Spark SQL command: SET name="value";). + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'properties' + description: | + Optional. A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Dataproc API might be overwritten. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::Array + name: 'jarFileUris' + description: | + Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'loggingConfig' + description: | + The runtime logging config of the job. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'driverLogLevels' + description: | + The per-package log levels for the driver. This can include "root" package name to configure rootLogger. Examples: - 'com.google = FATAL' - 'root = INFO' - 'org.apache = DEBUG' + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'prestoJob' + description: | + A Dataproc job for running Presto (https://prestosql.io/) queries. IMPORTANT: The Dataproc Presto Optional Component (https://cloud.google.com/dataproc/docs/concepts/components/presto) must be enabled when the cluster is created to submit a Presto job to the cluster. + properties: + - !ruby/object:Api::Type::String + name: 'queryFileUri' + description: | + The HCFS URI of the script that contains SQL queries. + - !ruby/object:Api::Type::NestedObject + name: 'queryList' + description: | + A list of queries to run on a cluster. + properties: + - !ruby/object:Api::Type::Array + name: 'queries' + description: | + Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": [ "query1", "query2", "query3;query4", ] } } + item_type: Api::Type::String + - !ruby/object:Api::Type::Boolean + name: 'continueOnFailure' + description: | + Optional. Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries. + - !ruby/object:Api::Type::String + name: 'outputFormat' + description: | + Optional. The format in which query output will be displayed. See the Presto documentation for supported output formats + - !ruby/object:Api::Type::Array + name: 'clientTags' + description: | + Optional. Presto client tags to attach to this query + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'properties' + description: | + Optional. A mapping of property names to values. Used to set Presto session properties (https://prestodb.io/docs/current/sql/set-session.html) Equivalent to using the --session flag in the Presto CLI + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'loggingConfig' + description: | + The runtime logging config of the job. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'driverLogLevels' + description: | + The per-package log levels for the driver. This can include "root" package name to configure rootLogger. Examples: - 'com.google = FATAL' - 'root = INFO' - 'org.apache = DEBUG' + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'trinoJob' + description: | + A Dataproc job for running Trino (https://trino.io/) queries. IMPORTANT: The Dataproc Trino Optional Component (https://cloud.google.com/dataproc/docs/concepts/components/trino) must be enabled when the cluster is created to submit a Trino job to the cluster. + properties: + - !ruby/object:Api::Type::String + name: 'queryFileUri' + description: | + The HCFS URI of the script that contains SQL queries. + - !ruby/object:Api::Type::NestedObject + name: 'queryList' + description: | + A list of queries to run on a cluster. + properties: + - !ruby/object:Api::Type::Array + name: 'queries' + description: | + Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": [ "query1", "query2", "query3;query4", ] } } + item_type: Api::Type::String + - !ruby/object:Api::Type::Boolean + name: 'continueOnFailure' + description: | + Optional. Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries. + - !ruby/object:Api::Type::String + name: 'outputFormat' + description: | + Optional. The format in which query output will be displayed. See the Trino documentation for supported output formats + - !ruby/object:Api::Type::Array + name: 'clientTags' + description: | + Optional. Trino client tags to attach to this query + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'properties' + description: | + Optional. A mapping of property names to values. Used to set Trino session properties (https://trino.io/docs/current/sql/set-session.html) Equivalent to using the --session flag in the Trino CLI + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'loggingConfig' + description: | + The runtime logging config of the job. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'driverLogLevels' + description: | + The per-package log levels for the driver. This can include "root" package name to configure rootLogger. Examples: - 'com.google = FATAL' - 'root = INFO' - 'org.apache = DEBUG' + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'flinkJob' + description: | + A Dataproc job for running Apache Flink applications on YARN. + properties: + - !ruby/object:Api::Type::String + name: 'mainJarFileUri' + description: | + The HCFS URI of the jar file that contains the main class. + - !ruby/object:Api::Type::String + name: 'mainClass' + description: | + The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in jarFileUris. + - !ruby/object:Api::Type::Array + name: 'args' + description: | + Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision might occur that causes an incorrect job submission. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'jarFileUris' + description: | + Optional. HCFS URIs of jar files to add to the CLASSPATHs of the Flink driver and tasks. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'savepointUri' + description: | + Optional. HCFS URI of the savepoint, which contains the last saved progress for starting the current job. + - !ruby/object:Api::Type::NestedObject + name: 'properties' + description: | + Optional. A mapping of property names to values, used to configure Flink. Properties that conflict with values set by the Dataproc API might beoverwritten. Can include properties set in/etc/flink/conf/flink-defaults.conf and classes in user code. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'loggingConfig' + description: | + The runtime logging config of the job. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'driverLogLevels' + description: | + The per-package log levels for the driver. This can include "root" package name to configure rootLogger. Examples: - 'com.google = FATAL' - 'root = INFO' - 'org.apache = DEBUG' + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + Optional. The labels to associate with this job.Label keys must be between 1 and 63 characters long, and must conform to the following regular expression: \p{Ll}\p{Lo}{0,62}Label values must be between 1 and 63 characters long, and must conform to the following regular expression: \p{Ll}\p{Lo}\p{N}_-{0,63}No more than 32 labels can be associated with a given job. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'scheduling' + description: | + Job scheduling options. + properties: + - !ruby/object:Api::Type::Integer + name: 'maxFailuresPerHour' + description: | + Optional. Maximum number of times per hour a driver can be restarted as a result of driver exiting with non-zero code before job is reported failed.A job might be reported as thrashing if the driver exits with a non-zero code four times within a 10-minute window.Maximum value is 10.Note: This restartable job option is not supported in Dataproc workflow templates (https://cloud.google.com/dataproc/docs/concepts/workflows/using-workflows#adding_jobs_to_a_template). + - !ruby/object:Api::Type::Integer + name: 'maxFailuresTotal' + description: | + Optional. Maximum total number of times a driver can be restarted as a result of the driver exiting with a non-zero code. After the maximum number is reached, the job will be reported as failed.Maximum value is 240.Note: Currently, this restartable job option is not supported in Dataproc workflow templates (https://cloud.google.com/dataproc/docs/concepts/workflows/using-workflows#adding_jobs_to_a_template). + - !ruby/object:Api::Type::Array + name: 'prerequisiteStepIds' + description: | + Optional. The optional list of prerequisite job step_ids. If not specified, the job will start at the beginning of workflow. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'parameters' + description: | + Optional. Template parameters whose values are substituted into the template. Values for parameters must be provided when the template is instantiated. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'name' + description: | + Required. Parameter name. The parameter name is used as the key, and paired with the parameter value, which are passed to the template when the template is instantiated. The name must contain only capital letters (A-Z), numbers (0-9), and underscores (_), and must not start with a number. The maximum length is 40 characters. + - !ruby/object:Api::Type::Array + name: 'fields' + description: | + Required. Paths to all fields that the parameter replaces. A field is allowed to appear in at most one parameter's list of field paths.A field path is similar in syntax to a google.protobuf.FieldMask. For example, a field path that references the zone field of a workflow template's cluster selector would be specified as placement.clusterSelector.zone.Also, field paths can reference fields using the following syntax: Values in maps can be referenced by key: labels'key' placement.clusterSelector.clusterLabels'key' placement.managedCluster.labels'key' placement.clusterSelector.clusterLabels'key' jobs'step-id'.labels'key' Jobs in the jobs list can be referenced by step-id: jobs'step-id'.hadoopJob.mainJarFileUri jobs'step-id'.hiveJob.queryFileUri jobs'step-id'.pySparkJob.mainPythonFileUri jobs'step-id'.hadoopJob.jarFileUris0 jobs'step-id'.hadoopJob.archiveUris0 jobs'step-id'.hadoopJob.fileUris0 jobs'step-id'.pySparkJob.pythonFileUris0 Items in repeated fields can be referenced by a zero-based index: jobs'step-id'.sparkJob.args0 Other examples: jobs'step-id'.hadoopJob.properties'key' jobs'step-id'.hadoopJob.args0 jobs'step-id'.hiveJob.scriptVariables'key' jobs'step-id'.hadoopJob.mainJarFileUri placement.clusterSelector.zoneIt may not be possible to parameterize maps and repeated fields in their entirety since only individual map values and individual items in repeated fields can be referenced. For example, the following field paths are invalid: placement.clusterSelector.clusterLabels jobs'step-id'.sparkJob.args + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'description' + description: | + Optional. Brief description of the parameter. Must not exceed 1024 characters. + - !ruby/object:Api::Type::NestedObject + name: 'validation' + description: | + Configuration for parameter validation. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'regex' + description: | + Validation based on regular expressions. + properties: + - !ruby/object:Api::Type::Array + name: 'regexes' + description: | + Required. RE2 regular expressions used to validate the parameter's value. The value must match the regex in its entirety (substring matches are not sufficient). + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'values' + description: | + Validation based on a list of allowed values. + properties: + - !ruby/object:Api::Type::Array + name: 'values' + description: | + Required. List of allowed values for the parameter. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'dagTimeout' + description: | + Optional. Timeout duration for the DAG of jobs, expressed in seconds (see JSON representation of duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). The timeout duration must be from 10 minutes ("600s") to 24 hours ("86400s"). The timer begins when the first job is submitted. If the workflow is running at the end of the timeout period, any remaining jobs are cancelled, the workflow is ended, and if the workflow was running on a managed cluster, the cluster is deleted. + - !ruby/object:Api::Type::NestedObject + name: 'encryptionConfig' + description: | + Encryption settings for encrypting workflow template job arguments. + properties: + - !ruby/object:Api::Type::String + name: 'kmsKey' + description: | + Optional. The Cloud KMS key name to use for encrypting workflow template job arguments.When this this key is provided, the following workflow template job arguments (https://cloud.google.com/dataproc/docs/concepts/workflows/use-workflows#adding_jobs_to_a_template), if present, are CMEK encrypted (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/customer-managed-encryption#use_cmek_with_workflow_template_data): FlinkJob args (https://cloud.google.com/dataproc/docs/reference/rest/v1/FlinkJob) HadoopJob args (https://cloud.google.com/dataproc/docs/reference/rest/v1/HadoopJob) SparkJob args (https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkJob) SparkRJob args (https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkRJob) PySparkJob args (https://cloud.google.com/dataproc/docs/reference/rest/v1/PySparkJob) SparkSqlJob (https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkSqlJob) scriptVariables and queryList.queries HiveJob (https://cloud.google.com/dataproc/docs/reference/rest/v1/HiveJob) scriptVariables and queryList.queries PigJob (https://cloud.google.com/dataproc/docs/reference/rest/v1/PigJob) scriptVariables and queryList.queries PrestoJob (https://cloud.google.com/dataproc/docs/reference/rest/v1/PrestoJob) scriptVariables and queryList.queries + + + + + - !ruby/object:Api::Resource + name: ProjectRegionJob + base_url: 'projects/{{projectId}}/regions/{{region}}/jobs' + self_link: 'projects/{{projectId}}/regions/{{region}}/jobs/{{jobId}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/dataproc/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + A Dataproc job resource. + properties: + + - !ruby/object:Api::Type::NestedObject + name: 'reference' + description: | + Encapsulates the full scoping used to reference a job. + properties: + - !ruby/object:Api::Type::String + name: 'projectId' + description: | + Optional. The ID of the Google Cloud Platform project that the job belongs to. If specified, must match the request project ID. + - !ruby/object:Api::Type::String + name: 'jobId' + description: | + Optional. The job ID, which must be unique within the project.The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or hyphens (-). The maximum length is 100 characters.If not specified by the caller, the job ID will be provided by the server. + - !ruby/object:Api::Type::NestedObject + name: 'placement' + description: | + Dataproc job config. + properties: + - !ruby/object:Api::Type::String + name: 'clusterName' + description: | + Required. The name of the cluster where the job will be submitted. + - !ruby/object:Api::Type::String + name: 'clusterUuid' + description: | + Output only. A cluster UUID generated by the Dataproc service when the job is submitted. + - !ruby/object:Api::Type::NestedObject + name: 'clusterLabels' + description: | + Optional. Cluster labels to identify a cluster where the job will be submitted. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'hadoopJob' + description: | + A Dataproc job for running Apache Hadoop MapReduce (https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html) jobs on Apache Hadoop YARN (https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html). + properties: + - !ruby/object:Api::Type::String + name: 'mainJarFileUri' + description: | + The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar' + - !ruby/object:Api::Type::String + name: 'mainClass' + description: | + The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in jar_file_uris. + - !ruby/object:Api::Type::Array + name: 'args' + description: | + Optional. The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision might occur that causes an incorrect job submission. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'jarFileUris' + description: | + Optional. Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'fileUris' + description: | + Optional. HCFS (Hadoop Compatible Filesystem) URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'archiveUris' + description: | + Optional. HCFS URIs of archives to be extracted in the working directory of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .zip. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'properties' + description: | + Optional. A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes in user code. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'loggingConfig' + description: | + The runtime logging config of the job. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'driverLogLevels' + description: | + The per-package log levels for the driver. This can include "root" package name to configure rootLogger. Examples: - 'com.google = FATAL' - 'root = INFO' - 'org.apache = DEBUG' + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'sparkJob' + description: | + A Dataproc job for running Apache Spark (https://spark.apache.org/) applications on YARN. + properties: + - !ruby/object:Api::Type::String + name: 'mainJarFileUri' + description: | + The HCFS URI of the jar file that contains the main class. + - !ruby/object:Api::Type::String + name: 'mainClass' + description: | + The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in SparkJob.jar_file_uris. + - !ruby/object:Api::Type::Array + name: 'args' + description: | + Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'jarFileUris' + description: | + Optional. HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'fileUris' + description: | + Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'archiveUris' + description: | + Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'properties' + description: | + Optional. A mapping of property names to values, used to configure Spark. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'loggingConfig' + description: | + The runtime logging config of the job. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'driverLogLevels' + description: | + The per-package log levels for the driver. This can include "root" package name to configure rootLogger. Examples: - 'com.google = FATAL' - 'root = INFO' - 'org.apache = DEBUG' + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'pysparkJob' + description: | + A Dataproc job for running Apache PySpark (https://spark.apache.org/docs/0.9.0/python-programming-guide.html) applications on YARN. + properties: + - !ruby/object:Api::Type::String + name: 'mainPythonFileUri' + description: | + Required. The HCFS URI of the main Python file to use as the driver. Must be a .py file. + - !ruby/object:Api::Type::Array + name: 'args' + description: | + Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'pythonFileUris' + description: | + Optional. HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'jarFileUris' + description: | + Optional. HCFS URIs of jar files to add to the CLASSPATHs of the Python driver and tasks. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'fileUris' + description: | + Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'archiveUris' + description: | + Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'properties' + description: | + Optional. A mapping of property names to values, used to configure PySpark. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'loggingConfig' + description: | + The runtime logging config of the job. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'driverLogLevels' + description: | + The per-package log levels for the driver. This can include "root" package name to configure rootLogger. Examples: - 'com.google = FATAL' - 'root = INFO' - 'org.apache = DEBUG' + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'hiveJob' + description: | + A Dataproc job for running Apache Hive (https://hive.apache.org/) queries on YARN. + properties: + - !ruby/object:Api::Type::String + name: 'queryFileUri' + description: | + The HCFS URI of the script that contains Hive queries. + - !ruby/object:Api::Type::NestedObject + name: 'queryList' + description: | + A list of queries to run on a cluster. + properties: + - !ruby/object:Api::Type::Array + name: 'queries' + description: | + Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": [ "query1", "query2", "query3;query4", ] } } + item_type: Api::Type::String + - !ruby/object:Api::Type::Boolean + name: 'continueOnFailure' + description: | + Optional. Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries. + - !ruby/object:Api::Type::NestedObject + name: 'scriptVariables' + description: | + Optional. Mapping of query variable names to values (equivalent to the Hive command: SET name="value";). + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'properties' + description: | + Optional. A mapping of property names and values, used to configure Hive. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/hive/conf/hive-site.xml, and classes in user code. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::Array + name: 'jarFileUris' + description: | + Optional. HCFS URIs of jar files to add to the CLASSPATH of the Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes and UDFs. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'pigJob' + description: | + A Dataproc job for running Apache Pig (https://pig.apache.org/) queries on YARN. + properties: + - !ruby/object:Api::Type::String + name: 'queryFileUri' + description: | + The HCFS URI of the script that contains the Pig queries. + - !ruby/object:Api::Type::NestedObject + name: 'queryList' + description: | + A list of queries to run on a cluster. + properties: + - !ruby/object:Api::Type::Array + name: 'queries' + description: | + Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": [ "query1", "query2", "query3;query4", ] } } + item_type: Api::Type::String + - !ruby/object:Api::Type::Boolean + name: 'continueOnFailure' + description: | + Optional. Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries. + - !ruby/object:Api::Type::NestedObject + name: 'scriptVariables' + description: | + Optional. Mapping of query variable names to values (equivalent to the Pig command: name=[value]). + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'properties' + description: | + Optional. A mapping of property names to values, used to configure Pig. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/pig/conf/pig.properties, and classes in user code. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::Array + name: 'jarFileUris' + description: | + Optional. HCFS URIs of jar files to add to the CLASSPATH of the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'loggingConfig' + description: | + The runtime logging config of the job. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'driverLogLevels' + description: | + The per-package log levels for the driver. This can include "root" package name to configure rootLogger. Examples: - 'com.google = FATAL' - 'root = INFO' - 'org.apache = DEBUG' + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'sparkRJob' + description: | + A Dataproc job for running Apache SparkR (https://spark.apache.org/docs/latest/sparkr.html) applications on YARN. + properties: + - !ruby/object:Api::Type::String + name: 'mainRFileUri' + description: | + Required. The HCFS URI of the main R file to use as the driver. Must be a .R file. + - !ruby/object:Api::Type::Array + name: 'args' + description: | + Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'fileUris' + description: | + Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'archiveUris' + description: | + Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'properties' + description: | + Optional. A mapping of property names to values, used to configure SparkR. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'loggingConfig' + description: | + The runtime logging config of the job. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'driverLogLevels' + description: | + The per-package log levels for the driver. This can include "root" package name to configure rootLogger. Examples: - 'com.google = FATAL' - 'root = INFO' - 'org.apache = DEBUG' + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'sparkSqlJob' + description: | + A Dataproc job for running Apache Spark SQL (https://spark.apache.org/sql/) queries. + properties: + - !ruby/object:Api::Type::String + name: 'queryFileUri' + description: | + The HCFS URI of the script that contains SQL queries. + - !ruby/object:Api::Type::NestedObject + name: 'queryList' + description: | + A list of queries to run on a cluster. + properties: + - !ruby/object:Api::Type::Array + name: 'queries' + description: | + Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": [ "query1", "query2", "query3;query4", ] } } + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'scriptVariables' + description: | + Optional. Mapping of query variable names to values (equivalent to the Spark SQL command: SET name="value";). + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'properties' + description: | + Optional. A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Dataproc API might be overwritten. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::Array + name: 'jarFileUris' + description: | + Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'loggingConfig' + description: | + The runtime logging config of the job. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'driverLogLevels' + description: | + The per-package log levels for the driver. This can include "root" package name to configure rootLogger. Examples: - 'com.google = FATAL' - 'root = INFO' - 'org.apache = DEBUG' + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'prestoJob' + description: | + A Dataproc job for running Presto (https://prestosql.io/) queries. IMPORTANT: The Dataproc Presto Optional Component (https://cloud.google.com/dataproc/docs/concepts/components/presto) must be enabled when the cluster is created to submit a Presto job to the cluster. + properties: + - !ruby/object:Api::Type::String + name: 'queryFileUri' + description: | + The HCFS URI of the script that contains SQL queries. + - !ruby/object:Api::Type::NestedObject + name: 'queryList' + description: | + A list of queries to run on a cluster. + properties: + - !ruby/object:Api::Type::Array + name: 'queries' + description: | + Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": [ "query1", "query2", "query3;query4", ] } } + item_type: Api::Type::String + - !ruby/object:Api::Type::Boolean + name: 'continueOnFailure' + description: | + Optional. Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries. + - !ruby/object:Api::Type::String + name: 'outputFormat' + description: | + Optional. The format in which query output will be displayed. See the Presto documentation for supported output formats + - !ruby/object:Api::Type::Array + name: 'clientTags' + description: | + Optional. Presto client tags to attach to this query + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'properties' + description: | + Optional. A mapping of property names to values. Used to set Presto session properties (https://prestodb.io/docs/current/sql/set-session.html) Equivalent to using the --session flag in the Presto CLI + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'loggingConfig' + description: | + The runtime logging config of the job. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'driverLogLevels' + description: | + The per-package log levels for the driver. This can include "root" package name to configure rootLogger. Examples: - 'com.google = FATAL' - 'root = INFO' - 'org.apache = DEBUG' + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'trinoJob' + description: | + A Dataproc job for running Trino (https://trino.io/) queries. IMPORTANT: The Dataproc Trino Optional Component (https://cloud.google.com/dataproc/docs/concepts/components/trino) must be enabled when the cluster is created to submit a Trino job to the cluster. + properties: + - !ruby/object:Api::Type::String + name: 'queryFileUri' + description: | + The HCFS URI of the script that contains SQL queries. + - !ruby/object:Api::Type::NestedObject + name: 'queryList' + description: | + A list of queries to run on a cluster. + properties: + - !ruby/object:Api::Type::Array + name: 'queries' + description: | + Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": [ "query1", "query2", "query3;query4", ] } } + item_type: Api::Type::String + - !ruby/object:Api::Type::Boolean + name: 'continueOnFailure' + description: | + Optional. Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries. + - !ruby/object:Api::Type::String + name: 'outputFormat' + description: | + Optional. The format in which query output will be displayed. See the Trino documentation for supported output formats + - !ruby/object:Api::Type::Array + name: 'clientTags' + description: | + Optional. Trino client tags to attach to this query + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'properties' + description: | + Optional. A mapping of property names to values. Used to set Trino session properties (https://trino.io/docs/current/sql/set-session.html) Equivalent to using the --session flag in the Trino CLI + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'loggingConfig' + description: | + The runtime logging config of the job. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'driverLogLevels' + description: | + The per-package log levels for the driver. This can include "root" package name to configure rootLogger. Examples: - 'com.google = FATAL' - 'root = INFO' - 'org.apache = DEBUG' + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'flinkJob' + description: | + A Dataproc job for running Apache Flink applications on YARN. + properties: + - !ruby/object:Api::Type::String + name: 'mainJarFileUri' + description: | + The HCFS URI of the jar file that contains the main class. + - !ruby/object:Api::Type::String + name: 'mainClass' + description: | + The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in jarFileUris. + - !ruby/object:Api::Type::Array + name: 'args' + description: | + Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision might occur that causes an incorrect job submission. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'jarFileUris' + description: | + Optional. HCFS URIs of jar files to add to the CLASSPATHs of the Flink driver and tasks. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'savepointUri' + description: | + Optional. HCFS URI of the savepoint, which contains the last saved progress for starting the current job. + - !ruby/object:Api::Type::NestedObject + name: 'properties' + description: | + Optional. A mapping of property names to values, used to configure Flink. Properties that conflict with values set by the Dataproc API might beoverwritten. Can include properties set in/etc/flink/conf/flink-defaults.conf and classes in user code. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'loggingConfig' + description: | + The runtime logging config of the job. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'driverLogLevels' + description: | + The per-package log levels for the driver. This can include "root" package name to configure rootLogger. Examples: - 'com.google = FATAL' - 'root = INFO' - 'org.apache = DEBUG' + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'status' + description: | + Dataproc job status. + properties: + - !ruby/object:Api::Type::Enum + name: 'state' + description: | + Output only. A state message specifying the overall job state. + values: + - :STATE_UNSPECIFIED + - :PENDING + - :SETUP_DONE + - :RUNNING + - :CANCEL_PENDING + - :CANCEL_STARTED + - :CANCELLED + - :DONE + - :ERROR + - :ATTEMPT_FAILURE + - !ruby/object:Api::Type::String + name: 'details' + description: | + Optional. Output only. Job state details, such as an error description if the state is ERROR. + - !ruby/object:Api::Type::String + name: 'stateStartTime' + description: | + Output only. The time when this state was entered. + - !ruby/object:Api::Type::Enum + name: 'substate' + description: | + Output only. Additional state information, which includes status reported by the agent. + values: + - :UNSPECIFIED + - :SUBMITTED + - :QUEUED + - :STALE_STATUS + - !ruby/object:Api::Type::Array + name: 'statusHistory' + description: | + Output only. The previous job status. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::Enum + name: 'state' + description: | + Output only. A state message specifying the overall job state. + values: + - :STATE_UNSPECIFIED + - :PENDING + - :SETUP_DONE + - :RUNNING + - :CANCEL_PENDING + - :CANCEL_STARTED + - :CANCELLED + - :DONE + - :ERROR + - :ATTEMPT_FAILURE + - !ruby/object:Api::Type::String + name: 'details' + description: | + Optional. Output only. Job state details, such as an error description if the state is ERROR. + - !ruby/object:Api::Type::String + name: 'stateStartTime' + description: | + Output only. The time when this state was entered. + - !ruby/object:Api::Type::Enum + name: 'substate' + description: | + Output only. Additional state information, which includes status reported by the agent. + values: + - :UNSPECIFIED + - :SUBMITTED + - :QUEUED + - :STALE_STATUS + - !ruby/object:Api::Type::Array + name: 'yarnApplications' + description: | + Output only. The collection of YARN applications spun up by this job.Beta Feature: This report is available for testing purposes only. It might be changed before final release. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'name' + description: | + Required. The application name. + - !ruby/object:Api::Type::Enum + name: 'state' + description: | + Required. The application state. + values: + - :STATE_UNSPECIFIED + - :NEW + - :NEW_SAVING + - :SUBMITTED + - :ACCEPTED + - :RUNNING + - :FINISHED + - :FAILED + - :KILLED + - !ruby/object:Api::Type::Integer + name: 'progress' + description: | + Required. The numerical progress of the application, from 1 to 100. + - !ruby/object:Api::Type::String + name: 'trackingUrl' + description: | + Optional. The HTTP URL of the ApplicationMaster, HistoryServer, or TimelineServer that provides application-specific information. The URL uses the internal hostname, and requires a proxy server for resolution and, possibly, access. + - !ruby/object:Api::Type::String + name: 'driverOutputResourceUri' + description: | + Output only. A URI pointing to the location of the stdout of the job's driver program. + - !ruby/object:Api::Type::String + name: 'driverControlFilesUri' + description: | + Output only. If present, the location of miscellaneous control files which can be used as part of job setup and handling. If not present, control files might be placed in the same location as driver_output_uri. + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + Optional. The labels to associate with this job. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a job. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'scheduling' + description: | + Job scheduling options. + properties: + - !ruby/object:Api::Type::Integer + name: 'maxFailuresPerHour' + description: | + Optional. Maximum number of times per hour a driver can be restarted as a result of driver exiting with non-zero code before job is reported failed.A job might be reported as thrashing if the driver exits with a non-zero code four times within a 10-minute window.Maximum value is 10.Note: This restartable job option is not supported in Dataproc workflow templates (https://cloud.google.com/dataproc/docs/concepts/workflows/using-workflows#adding_jobs_to_a_template). + - !ruby/object:Api::Type::Integer + name: 'maxFailuresTotal' + description: | + Optional. Maximum total number of times a driver can be restarted as a result of the driver exiting with a non-zero code. After the maximum number is reached, the job will be reported as failed.Maximum value is 240.Note: Currently, this restartable job option is not supported in Dataproc workflow templates (https://cloud.google.com/dataproc/docs/concepts/workflows/using-workflows#adding_jobs_to_a_template). + - !ruby/object:Api::Type::String + name: 'jobUuid' + description: | + Output only. A UUID that uniquely identifies a job within the project over time. This is in contrast to a user-settable reference.job_id that might be reused over time. + - !ruby/object:Api::Type::Boolean + name: 'done' + description: | + Output only. Indicates whether the job is completed. If the value is false, the job is still in progress. If true, the job is completed, and status.state field will indicate if it was successful, failed, or cancelled. + - !ruby/object:Api::Type::NestedObject + name: 'driverSchedulingConfig' + description: | + Driver scheduling configuration. + properties: + - !ruby/object:Api::Type::Integer + name: 'memoryMb' + description: | + Required. The amount of memory in MB the driver is requesting. + - !ruby/object:Api::Type::Integer + name: 'vcores' + description: | + Required. The number of vCPUs the driver is requesting. + + + + + - !ruby/object:Api::Resource + name: ProjectRegionAutoscalingPolicy + base_url: '{{+parent}}/autoscalingPolicies' + self_link: '{{+name}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/dataproc/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + Describes an autoscaling policy for Dataproc cluster autoscaler. + properties: + + - !ruby/object:Api::Type::String + name: 'id' + description: | + Required. The policy id.The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between 3 and 50 characters. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Output only. The "resource name" of the autoscaling policy, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.autoscalingPolicies, the resource name of the policy has the following format: projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id} For projects.locations.autoscalingPolicies, the resource name of the policy has the following format: projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id} + - !ruby/object:Api::Type::NestedObject + name: 'basicAlgorithm' + description: | + Basic algorithm for autoscaling. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'yarnConfig' + description: | + Basic autoscaling configurations for YARN. + properties: + - !ruby/object:Api::Type::String + name: 'gracefulDecommissionTimeout' + description: | + Required. Timeout for YARN graceful decommissioning of Node Managers. Specifies the duration to wait for jobs to complete before forcefully removing workers (and potentially interrupting jobs). Only applicable to downscaling operations.Bounds: 0s, 1d. + - !ruby/object:Api::Type::Integer + name: 'scaleUpFactor' + description: | + Required. Fraction of average YARN pending memory in the last cooldown period for which to add workers. A scale-up factor of 1.0 will result in scaling up so that there is no pending memory remaining after the update (more aggressive scaling). A scale-up factor closer to 0 will result in a smaller magnitude of scaling up (less aggressive scaling). See How autoscaling works (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/autoscaling#how_autoscaling_works) for more information.Bounds: 0.0, 1.0. + - !ruby/object:Api::Type::Integer + name: 'scaleDownFactor' + description: | + Required. Fraction of average YARN pending memory in the last cooldown period for which to remove workers. A scale-down factor of 1 will result in scaling down so that there is no available memory remaining after the update (more aggressive scaling). A scale-down factor of 0 disables removing workers, which can be beneficial for autoscaling a single job. See How autoscaling works (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/autoscaling#how_autoscaling_works) for more information.Bounds: 0.0, 1.0. + - !ruby/object:Api::Type::Integer + name: 'scaleUpMinWorkerFraction' + description: | + Optional. Minimum scale-up threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2-worker scale-up for the cluster to scale. A threshold of 0 means the autoscaler will scale up on any recommended change.Bounds: 0.0, 1.0. Default: 0.0. + - !ruby/object:Api::Type::Integer + name: 'scaleDownMinWorkerFraction' + description: | + Optional. Minimum scale-down threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2 worker scale-down for the cluster to scale. A threshold of 0 means the autoscaler will scale down on any recommended change.Bounds: 0.0, 1.0. Default: 0.0. + - !ruby/object:Api::Type::NestedObject + name: 'sparkStandaloneConfig' + description: | + Basic autoscaling configurations for Spark Standalone. + properties: + - !ruby/object:Api::Type::String + name: 'gracefulDecommissionTimeout' + description: | + Required. Timeout for Spark graceful decommissioning of spark workers. Specifies the duration to wait for spark worker to complete spark decommissioning tasks before forcefully removing workers. Only applicable to downscaling operations.Bounds: 0s, 1d. + - !ruby/object:Api::Type::Integer + name: 'scaleUpFactor' + description: | + Required. Fraction of required workers to add to Spark Standalone clusters. A scale-up factor of 1.0 will result in scaling up so that there are no more required workers for the Spark Job (more aggressive scaling). A scale-up factor closer to 0 will result in a smaller magnitude of scaling up (less aggressive scaling).Bounds: 0.0, 1.0. + - !ruby/object:Api::Type::Integer + name: 'scaleDownFactor' + description: | + Required. Fraction of required executors to remove from Spark Serverless clusters. A scale-down factor of 1.0 will result in scaling down so that there are no more executors for the Spark Job.(more aggressive scaling). A scale-down factor closer to 0 will result in a smaller magnitude of scaling donw (less aggressive scaling).Bounds: 0.0, 1.0. + - !ruby/object:Api::Type::Integer + name: 'scaleUpMinWorkerFraction' + description: | + Optional. Minimum scale-up threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2-worker scale-up for the cluster to scale. A threshold of 0 means the autoscaler will scale up on any recommended change.Bounds: 0.0, 1.0. Default: 0.0. + - !ruby/object:Api::Type::Integer + name: 'scaleDownMinWorkerFraction' + description: | + Optional. Minimum scale-down threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2 worker scale-down for the cluster to scale. A threshold of 0 means the autoscaler will scale down on any recommended change.Bounds: 0.0, 1.0. Default: 0.0. + - !ruby/object:Api::Type::Boolean + name: 'removeOnlyIdleWorkers' + description: | + Optional. Remove only idle workers when scaling down cluster + - !ruby/object:Api::Type::String + name: 'cooldownPeriod' + description: | + Optional. Duration between scaling events. A scaling period starts after the update operation from the previous event has completed.Bounds: 2m, 1d. Default: 2m. + - !ruby/object:Api::Type::NestedObject + name: 'workerConfig' + description: | + Configuration for the size bounds of an instance group, including its proportional size to other groups. + properties: + - !ruby/object:Api::Type::Integer + name: 'minInstances' + description: | + Optional. Minimum number of instances for this group.Primary workers - Bounds: 2, max_instances. Default: 2. Secondary workers - Bounds: 0, max_instances. Default: 0. + - !ruby/object:Api::Type::Integer + name: 'maxInstances' + description: | + Required. Maximum number of instances for this group. Required for primary workers. Note that by default, clusters will not use secondary workers. Required for secondary workers if the minimum secondary instances is set.Primary workers - Bounds: [min_instances, ). Secondary workers - Bounds: [min_instances, ). Default: 0. + - !ruby/object:Api::Type::Integer + name: 'weight' + description: | + Optional. Weight for the instance group, which is used to determine the fraction of total workers in the cluster from this instance group. For example, if primary workers have weight 2, and secondary workers have weight 1, the cluster will have approximately 2 primary workers for each secondary worker.The cluster may not reach the specified balance if constrained by min/max bounds or other autoscaling settings. For example, if max_instances for secondary workers is 0, then only primary workers will be added. The cluster can also be out of balance when created.If weight is not set on any instance group, the cluster will default to equal weight for all groups: the cluster will attempt to maintain an equal number of workers in each group within the configured size bounds for each group. If weight is set for one group only, the cluster will default to zero weight on the unset group. For example if weight is set only on primary workers, the cluster will use primary workers only and no secondary workers. + - !ruby/object:Api::Type::NestedObject + name: 'secondaryWorkerConfig' + description: | + Configuration for the size bounds of an instance group, including its proportional size to other groups. + properties: + - !ruby/object:Api::Type::Integer + name: 'minInstances' + description: | + Optional. Minimum number of instances for this group.Primary workers - Bounds: 2, max_instances. Default: 2. Secondary workers - Bounds: 0, max_instances. Default: 0. + - !ruby/object:Api::Type::Integer + name: 'maxInstances' + description: | + Required. Maximum number of instances for this group. Required for primary workers. Note that by default, clusters will not use secondary workers. Required for secondary workers if the minimum secondary instances is set.Primary workers - Bounds: [min_instances, ). Secondary workers - Bounds: [min_instances, ). Default: 0. + - !ruby/object:Api::Type::Integer + name: 'weight' + description: | + Optional. Weight for the instance group, which is used to determine the fraction of total workers in the cluster from this instance group. For example, if primary workers have weight 2, and secondary workers have weight 1, the cluster will have approximately 2 primary workers for each secondary worker.The cluster may not reach the specified balance if constrained by min/max bounds or other autoscaling settings. For example, if max_instances for secondary workers is 0, then only primary workers will be added. The cluster can also be out of balance when created.If weight is not set on any instance group, the cluster will default to equal weight for all groups: the cluster will attempt to maintain an equal number of workers in each group within the configured size bounds for each group. If weight is set for one group only, the cluster will default to zero weight on the unset group. For example if weight is set only on primary workers, the cluster will use primary workers only and no secondary workers. + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + Optional. The labels to associate with this autoscaling policy. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with an autoscaling policy. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + + + + + - !ruby/object:Api::Resource + name: ProjectRegionAutoscalingPolicy + base_url: '{{+parent}}/autoscalingPolicies' + self_link: '{{+name}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/dataproc/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + Describes an autoscaling policy for Dataproc cluster autoscaler. + properties: + + - !ruby/object:Api::Type::String + name: 'id' + description: | + Required. The policy id.The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between 3 and 50 characters. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Output only. The "resource name" of the autoscaling policy, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.autoscalingPolicies, the resource name of the policy has the following format: projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id} For projects.locations.autoscalingPolicies, the resource name of the policy has the following format: projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id} + - !ruby/object:Api::Type::NestedObject + name: 'basicAlgorithm' + description: | + Basic algorithm for autoscaling. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'yarnConfig' + description: | + Basic autoscaling configurations for YARN. + properties: + - !ruby/object:Api::Type::String + name: 'gracefulDecommissionTimeout' + description: | + Required. Timeout for YARN graceful decommissioning of Node Managers. Specifies the duration to wait for jobs to complete before forcefully removing workers (and potentially interrupting jobs). Only applicable to downscaling operations.Bounds: 0s, 1d. + - !ruby/object:Api::Type::Integer + name: 'scaleUpFactor' + description: | + Required. Fraction of average YARN pending memory in the last cooldown period for which to add workers. A scale-up factor of 1.0 will result in scaling up so that there is no pending memory remaining after the update (more aggressive scaling). A scale-up factor closer to 0 will result in a smaller magnitude of scaling up (less aggressive scaling). See How autoscaling works (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/autoscaling#how_autoscaling_works) for more information.Bounds: 0.0, 1.0. + - !ruby/object:Api::Type::Integer + name: 'scaleDownFactor' + description: | + Required. Fraction of average YARN pending memory in the last cooldown period for which to remove workers. A scale-down factor of 1 will result in scaling down so that there is no available memory remaining after the update (more aggressive scaling). A scale-down factor of 0 disables removing workers, which can be beneficial for autoscaling a single job. See How autoscaling works (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/autoscaling#how_autoscaling_works) for more information.Bounds: 0.0, 1.0. + - !ruby/object:Api::Type::Integer + name: 'scaleUpMinWorkerFraction' + description: | + Optional. Minimum scale-up threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2-worker scale-up for the cluster to scale. A threshold of 0 means the autoscaler will scale up on any recommended change.Bounds: 0.0, 1.0. Default: 0.0. + - !ruby/object:Api::Type::Integer + name: 'scaleDownMinWorkerFraction' + description: | + Optional. Minimum scale-down threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2 worker scale-down for the cluster to scale. A threshold of 0 means the autoscaler will scale down on any recommended change.Bounds: 0.0, 1.0. Default: 0.0. + - !ruby/object:Api::Type::NestedObject + name: 'sparkStandaloneConfig' + description: | + Basic autoscaling configurations for Spark Standalone. + properties: + - !ruby/object:Api::Type::String + name: 'gracefulDecommissionTimeout' + description: | + Required. Timeout for Spark graceful decommissioning of spark workers. Specifies the duration to wait for spark worker to complete spark decommissioning tasks before forcefully removing workers. Only applicable to downscaling operations.Bounds: 0s, 1d. + - !ruby/object:Api::Type::Integer + name: 'scaleUpFactor' + description: | + Required. Fraction of required workers to add to Spark Standalone clusters. A scale-up factor of 1.0 will result in scaling up so that there are no more required workers for the Spark Job (more aggressive scaling). A scale-up factor closer to 0 will result in a smaller magnitude of scaling up (less aggressive scaling).Bounds: 0.0, 1.0. + - !ruby/object:Api::Type::Integer + name: 'scaleDownFactor' + description: | + Required. Fraction of required executors to remove from Spark Serverless clusters. A scale-down factor of 1.0 will result in scaling down so that there are no more executors for the Spark Job.(more aggressive scaling). A scale-down factor closer to 0 will result in a smaller magnitude of scaling donw (less aggressive scaling).Bounds: 0.0, 1.0. + - !ruby/object:Api::Type::Integer + name: 'scaleUpMinWorkerFraction' + description: | + Optional. Minimum scale-up threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2-worker scale-up for the cluster to scale. A threshold of 0 means the autoscaler will scale up on any recommended change.Bounds: 0.0, 1.0. Default: 0.0. + - !ruby/object:Api::Type::Integer + name: 'scaleDownMinWorkerFraction' + description: | + Optional. Minimum scale-down threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2 worker scale-down for the cluster to scale. A threshold of 0 means the autoscaler will scale down on any recommended change.Bounds: 0.0, 1.0. Default: 0.0. + - !ruby/object:Api::Type::Boolean + name: 'removeOnlyIdleWorkers' + description: | + Optional. Remove only idle workers when scaling down cluster + - !ruby/object:Api::Type::String + name: 'cooldownPeriod' + description: | + Optional. Duration between scaling events. A scaling period starts after the update operation from the previous event has completed.Bounds: 2m, 1d. Default: 2m. + - !ruby/object:Api::Type::NestedObject + name: 'workerConfig' + description: | + Configuration for the size bounds of an instance group, including its proportional size to other groups. + properties: + - !ruby/object:Api::Type::Integer + name: 'minInstances' + description: | + Optional. Minimum number of instances for this group.Primary workers - Bounds: 2, max_instances. Default: 2. Secondary workers - Bounds: 0, max_instances. Default: 0. + - !ruby/object:Api::Type::Integer + name: 'maxInstances' + description: | + Required. Maximum number of instances for this group. Required for primary workers. Note that by default, clusters will not use secondary workers. Required for secondary workers if the minimum secondary instances is set.Primary workers - Bounds: [min_instances, ). Secondary workers - Bounds: [min_instances, ). Default: 0. + - !ruby/object:Api::Type::Integer + name: 'weight' + description: | + Optional. Weight for the instance group, which is used to determine the fraction of total workers in the cluster from this instance group. For example, if primary workers have weight 2, and secondary workers have weight 1, the cluster will have approximately 2 primary workers for each secondary worker.The cluster may not reach the specified balance if constrained by min/max bounds or other autoscaling settings. For example, if max_instances for secondary workers is 0, then only primary workers will be added. The cluster can also be out of balance when created.If weight is not set on any instance group, the cluster will default to equal weight for all groups: the cluster will attempt to maintain an equal number of workers in each group within the configured size bounds for each group. If weight is set for one group only, the cluster will default to zero weight on the unset group. For example if weight is set only on primary workers, the cluster will use primary workers only and no secondary workers. + - !ruby/object:Api::Type::NestedObject + name: 'secondaryWorkerConfig' + description: | + Configuration for the size bounds of an instance group, including its proportional size to other groups. + properties: + - !ruby/object:Api::Type::Integer + name: 'minInstances' + description: | + Optional. Minimum number of instances for this group.Primary workers - Bounds: 2, max_instances. Default: 2. Secondary workers - Bounds: 0, max_instances. Default: 0. + - !ruby/object:Api::Type::Integer + name: 'maxInstances' + description: | + Required. Maximum number of instances for this group. Required for primary workers. Note that by default, clusters will not use secondary workers. Required for secondary workers if the minimum secondary instances is set.Primary workers - Bounds: [min_instances, ). Secondary workers - Bounds: [min_instances, ). Default: 0. + - !ruby/object:Api::Type::Integer + name: 'weight' + description: | + Optional. Weight for the instance group, which is used to determine the fraction of total workers in the cluster from this instance group. For example, if primary workers have weight 2, and secondary workers have weight 1, the cluster will have approximately 2 primary workers for each secondary worker.The cluster may not reach the specified balance if constrained by min/max bounds or other autoscaling settings. For example, if max_instances for secondary workers is 0, then only primary workers will be added. The cluster can also be out of balance when created.If weight is not set on any instance group, the cluster will default to equal weight for all groups: the cluster will attempt to maintain an equal number of workers in each group within the configured size bounds for each group. If weight is set for one group only, the cluster will default to zero weight on the unset group. For example if weight is set only on primary workers, the cluster will use primary workers only and no secondary workers. + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + Optional. The labels to associate with this autoscaling policy. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with an autoscaling policy. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + + + + + - !ruby/object:Api::Resource + name: ProjectLocationSession + base_url: '{{+parent}}/sessions' + self_link: '{{+name}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/dataproc/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + A representation of a session. + properties: + + - !ruby/object:Api::Type::String + name: 'name' + description: | + Required. The resource name of the session. + - !ruby/object:Api::Type::String + name: 'uuid' + description: | + Output only. A session UUID (Unique Universal Identifier). The service generates this value when it creates the session. + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Output only. The time when the session was created. + - !ruby/object:Api::Type::NestedObject + name: 'jupyterSession' + description: | + Jupyter configuration for an interactive session. + properties: + - !ruby/object:Api::Type::Enum + name: 'kernel' + description: | + Optional. Kernel + values: + - :KERNEL_UNSPECIFIED + - :PYTHON + - :SCALA + - !ruby/object:Api::Type::String + name: 'displayName' + description: | + Optional. Display name, shown in the Jupyter kernelspec card. + - !ruby/object:Api::Type::NestedObject + name: 'runtimeInfo' + description: | + Runtime information about workload execution. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'endpoints' + description: | + Output only. Map of remote access endpoints (such as web interfaces and APIs) to their URIs. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::String + name: 'outputUri' + description: | + Output only. A URI pointing to the location of the stdout and stderr of the workload. + - !ruby/object:Api::Type::String + name: 'diagnosticOutputUri' + description: | + Output only. A URI pointing to the location of the diagnostics tarball. + - !ruby/object:Api::Type::NestedObject + name: 'approximateUsage' + description: | + Usage metrics represent approximate total resources consumed by a workload. + properties: + - !ruby/object:Api::Type::String + name: 'milliDcuSeconds' + description: | + Optional. DCU (Dataproc Compute Units) usage in (milliDCU x seconds) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). + - !ruby/object:Api::Type::String + name: 'shuffleStorageGbSeconds' + description: | + Optional. Shuffle storage usage in (GB x seconds) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). + - !ruby/object:Api::Type::String + name: 'milliAcceleratorSeconds' + description: | + Optional. Accelerator usage in (milliAccelerator x seconds) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). + - !ruby/object:Api::Type::String + name: 'acceleratorType' + description: | + Optional. Accelerator type being used, if any + - !ruby/object:Api::Type::NestedObject + name: 'currentUsage' + description: | + The usage snapshot represents the resources consumed by a workload at a specified time. + properties: + - !ruby/object:Api::Type::String + name: 'milliDcu' + description: | + Optional. Milli (one-thousandth) Dataproc Compute Units (DCUs) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). + - !ruby/object:Api::Type::String + name: 'shuffleStorageGb' + description: | + Optional. Shuffle Storage in gigabytes (GB). (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)) + - !ruby/object:Api::Type::String + name: 'milliDcuPremium' + description: | + Optional. Milli (one-thousandth) Dataproc Compute Units (DCUs) charged at premium tier (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). + - !ruby/object:Api::Type::String + name: 'shuffleStorageGbPremium' + description: | + Optional. Shuffle Storage in gigabytes (GB) charged at premium tier. (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)) + - !ruby/object:Api::Type::String + name: 'milliAccelerator' + description: | + Optional. Milli (one-thousandth) accelerator. (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)) + - !ruby/object:Api::Type::String + name: 'acceleratorType' + description: | + Optional. Accelerator type being used, if any + - !ruby/object:Api::Type::String + name: 'snapshotTime' + description: | + Optional. The timestamp of the usage snapshot. + - !ruby/object:Api::Type::Enum + name: 'state' + description: | + Output only. A state of the session. + values: + - :STATE_UNSPECIFIED + - :CREATING + - :ACTIVE + - :TERMINATING + - :TERMINATED + - :FAILED + - !ruby/object:Api::Type::String + name: 'stateMessage' + description: | + Output only. Session state details, such as the failure description if the state is FAILED. + - !ruby/object:Api::Type::String + name: 'stateTime' + description: | + Output only. The time when the session entered the current state. + - !ruby/object:Api::Type::String + name: 'creator' + description: | + Output only. The email address of the user who created the session. + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + Optional. The labels to associate with the session. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a session. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'runtimeConfig' + description: | + Runtime configuration for a workload. + properties: + - !ruby/object:Api::Type::String + name: 'version' + description: | + Optional. Version of the batch runtime. + - !ruby/object:Api::Type::String + name: 'containerImage' + description: | + Optional. Optional custom container image for the job runtime environment. If not specified, a default container image will be used. + - !ruby/object:Api::Type::NestedObject + name: 'properties' + description: | + Optional. A mapping of property names to values, which are used to configure workload execution. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'repositoryConfig' + description: | + Configuration for dependency repositories + properties: + - !ruby/object:Api::Type::NestedObject + name: 'pypiRepositoryConfig' + description: | + Configuration for PyPi repository + properties: + - !ruby/object:Api::Type::String + name: 'pypiRepository' + description: | + Optional. PyPi repository address + - !ruby/object:Api::Type::NestedObject + name: 'environmentConfig' + description: | + Environment configuration for a workload. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'executionConfig' + description: | + Execution configuration for a workload. + properties: + - !ruby/object:Api::Type::String + name: 'serviceAccount' + description: | + Optional. Service account that used to execute workload. + - !ruby/object:Api::Type::String + name: 'networkUri' + description: | + Optional. Network URI to connect workload to. + - !ruby/object:Api::Type::String + name: 'subnetworkUri' + description: | + Optional. Subnetwork URI to connect workload to. + - !ruby/object:Api::Type::Array + name: 'networkTags' + description: | + Optional. Tags used for network traffic control. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'kmsKey' + description: | + Optional. The Cloud KMS key to use for encryption. + - !ruby/object:Api::Type::String + name: 'idleTtl' + description: | + Optional. Applies to sessions only. The duration to keep the session alive while it's idling. Exceeding this threshold causes the session to terminate. This field cannot be set on a batch workload. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). Defaults to 1 hour if not set. If both ttl and idle_ttl are specified for an interactive session, the conditions are treated as OR conditions: the workload will be terminated when it has been idle for idle_ttl or when ttl has been exceeded, whichever occurs first. + - !ruby/object:Api::Type::String + name: 'ttl' + description: | + Optional. The duration after which the workload will be terminated, specified as the JSON representation for Duration (https://protobuf.dev/programming-guides/proto3/#json). When the workload exceeds this duration, it will be unconditionally terminated without waiting for ongoing work to finish. If ttl is not specified for a batch workload, the workload will be allowed to run until it exits naturally (or run forever without exiting). If ttl is not specified for an interactive session, it defaults to 24 hours. If ttl is not specified for a batch that uses 2.1+ runtime version, it defaults to 4 hours. Minimum value is 10 minutes; maximum value is 14 days. If both ttl and idle_ttl are specified (for an interactive session), the conditions are treated as OR conditions: the workload will be terminated when it has been idle for idle_ttl or when ttl has been exceeded, whichever occurs first. + - !ruby/object:Api::Type::String + name: 'stagingBucket' + description: | + Optional. A Cloud Storage bucket used to stage workload dependencies, config files, and store workload output and other ephemeral data, such as Spark history files. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location according to the region where your workload is running, and then create and manage project-level, per-location staging and temporary buckets. This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket. + - !ruby/object:Api::Type::NestedObject + name: 'peripheralsConfig' + description: | + Auxiliary services configuration for a workload. + properties: + - !ruby/object:Api::Type::String + name: 'metastoreService' + description: | + Optional. Resource name of an existing Dataproc Metastore service.Example: projects/[project_id]/locations/[region]/services/[service_id] + - !ruby/object:Api::Type::NestedObject + name: 'sparkHistoryServerConfig' + description: | + Spark History Server configuration for the workload. + properties: + - !ruby/object:Api::Type::String + name: 'dataprocCluster' + description: | + Optional. Resource name of an existing Dataproc Cluster to act as a Spark History Server for the workload.Example: projects/[project_id]/regions/[region]/clusters/[cluster_name] + - !ruby/object:Api::Type::String + name: 'user' + description: | + Optional. The email address of the user who owns the session. + - !ruby/object:Api::Type::Array + name: 'stateHistory' + description: | + Output only. Historical state information for the session. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::Enum + name: 'state' + description: | + Output only. The state of the session at this point in the session history. + values: + - :STATE_UNSPECIFIED + - :CREATING + - :ACTIVE + - :TERMINATING + - :TERMINATED + - :FAILED + - !ruby/object:Api::Type::String + name: 'stateMessage' + description: | + Output only. Details about the state at this point in the session history. + - !ruby/object:Api::Type::String + name: 'stateStartTime' + description: | + Output only. The time when the session entered the historical state. + - !ruby/object:Api::Type::String + name: 'sessionTemplate' + description: | + Optional. The session template used by the session.Only resource names, including project ID and location, are valid.Example: * https://www.googleapis.com/compute/v1/projects/[project_id]/locations/[dataproc_region]/sessionTemplates/[template_id] * projects/[project_id]/locations/[dataproc_region]/sessionTemplates/[template_id]The template must be in the same project and Dataproc region as the session. + + + + + - !ruby/object:Api::Resource + name: Batch + base_url: '{{+parent}}/batches' + self_link: '{{+name}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/dataproc/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + A representation of a batch workload in the service. + properties: + + - !ruby/object:Api::Type::String + name: 'name' + description: | + Output only. The resource name of the batch. + - !ruby/object:Api::Type::String + name: 'uuid' + description: | + Output only. A batch UUID (Unique Universal Identifier). The service generates this value when it creates the batch. + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Output only. The time when the batch was created. + - !ruby/object:Api::Type::NestedObject + name: 'pysparkBatch' + description: | + A configuration for running an Apache PySpark (https://spark.apache.org/docs/latest/api/python/getting_started/quickstart.html) batch workload. + properties: + - !ruby/object:Api::Type::String + name: 'mainPythonFileUri' + description: | + Required. The HCFS URI of the main Python file to use as the Spark driver. Must be a .py file. + - !ruby/object:Api::Type::Array + name: 'args' + description: | + Optional. The arguments to pass to the driver. Do not include arguments that can be set as batch properties, such as --conf, since a collision can occur that causes an incorrect batch submission. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'pythonFileUris' + description: | + Optional. HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'jarFileUris' + description: | + Optional. HCFS URIs of jar files to add to the classpath of the Spark driver and tasks. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'fileUris' + description: | + Optional. HCFS URIs of files to be placed in the working directory of each executor. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'archiveUris' + description: | + Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'sparkBatch' + description: | + A configuration for running an Apache Spark (https://spark.apache.org/) batch workload. + properties: + - !ruby/object:Api::Type::String + name: 'mainJarFileUri' + description: | + Optional. The HCFS URI of the jar file that contains the main class. + - !ruby/object:Api::Type::String + name: 'mainClass' + description: | + Optional. The name of the driver main class. The jar file that contains the class must be in the classpath or specified in jar_file_uris. + - !ruby/object:Api::Type::Array + name: 'args' + description: | + Optional. The arguments to pass to the driver. Do not include arguments that can be set as batch properties, such as --conf, since a collision can occur that causes an incorrect batch submission. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'jarFileUris' + description: | + Optional. HCFS URIs of jar files to add to the classpath of the Spark driver and tasks. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'fileUris' + description: | + Optional. HCFS URIs of files to be placed in the working directory of each executor. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'archiveUris' + description: | + Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'sparkRBatch' + description: | + A configuration for running an Apache SparkR (https://spark.apache.org/docs/latest/sparkr.html) batch workload. + properties: + - !ruby/object:Api::Type::String + name: 'mainRFileUri' + description: | + Required. The HCFS URI of the main R file to use as the driver. Must be a .R or .r file. + - !ruby/object:Api::Type::Array + name: 'args' + description: | + Optional. The arguments to pass to the Spark driver. Do not include arguments that can be set as batch properties, such as --conf, since a collision can occur that causes an incorrect batch submission. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'fileUris' + description: | + Optional. HCFS URIs of files to be placed in the working directory of each executor. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'archiveUris' + description: | + Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'sparkSqlBatch' + description: | + A configuration for running Apache Spark SQL (https://spark.apache.org/sql/) queries as a batch workload. + properties: + - !ruby/object:Api::Type::String + name: 'queryFileUri' + description: | + Required. The HCFS URI of the script that contains Spark SQL queries to execute. + - !ruby/object:Api::Type::NestedObject + name: 'queryVariables' + description: | + Optional. Mapping of query variable names to values (equivalent to the Spark SQL command: SET name="value";). + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::Array + name: 'jarFileUris' + description: | + Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'runtimeInfo' + description: | + Runtime information about workload execution. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'endpoints' + description: | + Output only. Map of remote access endpoints (such as web interfaces and APIs) to their URIs. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::String + name: 'outputUri' + description: | + Output only. A URI pointing to the location of the stdout and stderr of the workload. + - !ruby/object:Api::Type::String + name: 'diagnosticOutputUri' + description: | + Output only. A URI pointing to the location of the diagnostics tarball. + - !ruby/object:Api::Type::NestedObject + name: 'approximateUsage' + description: | + Usage metrics represent approximate total resources consumed by a workload. + properties: + - !ruby/object:Api::Type::String + name: 'milliDcuSeconds' + description: | + Optional. DCU (Dataproc Compute Units) usage in (milliDCU x seconds) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). + - !ruby/object:Api::Type::String + name: 'shuffleStorageGbSeconds' + description: | + Optional. Shuffle storage usage in (GB x seconds) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). + - !ruby/object:Api::Type::String + name: 'milliAcceleratorSeconds' + description: | + Optional. Accelerator usage in (milliAccelerator x seconds) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). + - !ruby/object:Api::Type::String + name: 'acceleratorType' + description: | + Optional. Accelerator type being used, if any + - !ruby/object:Api::Type::NestedObject + name: 'currentUsage' + description: | + The usage snapshot represents the resources consumed by a workload at a specified time. + properties: + - !ruby/object:Api::Type::String + name: 'milliDcu' + description: | + Optional. Milli (one-thousandth) Dataproc Compute Units (DCUs) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). + - !ruby/object:Api::Type::String + name: 'shuffleStorageGb' + description: | + Optional. Shuffle Storage in gigabytes (GB). (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)) + - !ruby/object:Api::Type::String + name: 'milliDcuPremium' + description: | + Optional. Milli (one-thousandth) Dataproc Compute Units (DCUs) charged at premium tier (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). + - !ruby/object:Api::Type::String + name: 'shuffleStorageGbPremium' + description: | + Optional. Shuffle Storage in gigabytes (GB) charged at premium tier. (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)) + - !ruby/object:Api::Type::String + name: 'milliAccelerator' + description: | + Optional. Milli (one-thousandth) accelerator. (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)) + - !ruby/object:Api::Type::String + name: 'acceleratorType' + description: | + Optional. Accelerator type being used, if any + - !ruby/object:Api::Type::String + name: 'snapshotTime' + description: | + Optional. The timestamp of the usage snapshot. + - !ruby/object:Api::Type::Enum + name: 'state' + description: | + Output only. The state of the batch. + values: + - :STATE_UNSPECIFIED + - :PENDING + - :RUNNING + - :CANCELLING + - :CANCELLED + - :SUCCEEDED + - :FAILED + - !ruby/object:Api::Type::String + name: 'stateMessage' + description: | + Output only. Batch state details, such as a failure description if the state is FAILED. + - !ruby/object:Api::Type::String + name: 'stateTime' + description: | + Output only. The time when the batch entered a current state. + - !ruby/object:Api::Type::String + name: 'creator' + description: | + Output only. The email address of the user who created the batch. + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + Optional. The labels to associate with this batch. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a batch. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'runtimeConfig' + description: | + Runtime configuration for a workload. + properties: + - !ruby/object:Api::Type::String + name: 'version' + description: | + Optional. Version of the batch runtime. + - !ruby/object:Api::Type::String + name: 'containerImage' + description: | + Optional. Optional custom container image for the job runtime environment. If not specified, a default container image will be used. + - !ruby/object:Api::Type::NestedObject + name: 'properties' + description: | + Optional. A mapping of property names to values, which are used to configure workload execution. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'repositoryConfig' + description: | + Configuration for dependency repositories + properties: + - !ruby/object:Api::Type::NestedObject + name: 'pypiRepositoryConfig' + description: | + Configuration for PyPi repository + properties: + - !ruby/object:Api::Type::String + name: 'pypiRepository' + description: | + Optional. PyPi repository address + - !ruby/object:Api::Type::NestedObject + name: 'environmentConfig' + description: | + Environment configuration for a workload. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'executionConfig' + description: | + Execution configuration for a workload. + properties: + - !ruby/object:Api::Type::String + name: 'serviceAccount' + description: | + Optional. Service account that used to execute workload. + - !ruby/object:Api::Type::String + name: 'networkUri' + description: | + Optional. Network URI to connect workload to. + - !ruby/object:Api::Type::String + name: 'subnetworkUri' + description: | + Optional. Subnetwork URI to connect workload to. + - !ruby/object:Api::Type::Array + name: 'networkTags' + description: | + Optional. Tags used for network traffic control. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'kmsKey' + description: | + Optional. The Cloud KMS key to use for encryption. + - !ruby/object:Api::Type::String + name: 'idleTtl' + description: | + Optional. Applies to sessions only. The duration to keep the session alive while it's idling. Exceeding this threshold causes the session to terminate. This field cannot be set on a batch workload. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). Defaults to 1 hour if not set. If both ttl and idle_ttl are specified for an interactive session, the conditions are treated as OR conditions: the workload will be terminated when it has been idle for idle_ttl or when ttl has been exceeded, whichever occurs first. + - !ruby/object:Api::Type::String + name: 'ttl' + description: | + Optional. The duration after which the workload will be terminated, specified as the JSON representation for Duration (https://protobuf.dev/programming-guides/proto3/#json). When the workload exceeds this duration, it will be unconditionally terminated without waiting for ongoing work to finish. If ttl is not specified for a batch workload, the workload will be allowed to run until it exits naturally (or run forever without exiting). If ttl is not specified for an interactive session, it defaults to 24 hours. If ttl is not specified for a batch that uses 2.1+ runtime version, it defaults to 4 hours. Minimum value is 10 minutes; maximum value is 14 days. If both ttl and idle_ttl are specified (for an interactive session), the conditions are treated as OR conditions: the workload will be terminated when it has been idle for idle_ttl or when ttl has been exceeded, whichever occurs first. + - !ruby/object:Api::Type::String + name: 'stagingBucket' + description: | + Optional. A Cloud Storage bucket used to stage workload dependencies, config files, and store workload output and other ephemeral data, such as Spark history files. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location according to the region where your workload is running, and then create and manage project-level, per-location staging and temporary buckets. This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket. + - !ruby/object:Api::Type::NestedObject + name: 'peripheralsConfig' + description: | + Auxiliary services configuration for a workload. + properties: + - !ruby/object:Api::Type::String + name: 'metastoreService' + description: | + Optional. Resource name of an existing Dataproc Metastore service.Example: projects/[project_id]/locations/[region]/services/[service_id] + - !ruby/object:Api::Type::NestedObject + name: 'sparkHistoryServerConfig' + description: | + Spark History Server configuration for the workload. + properties: + - !ruby/object:Api::Type::String + name: 'dataprocCluster' + description: | + Optional. Resource name of an existing Dataproc Cluster to act as a Spark History Server for the workload.Example: projects/[project_id]/regions/[region]/clusters/[cluster_name] + - !ruby/object:Api::Type::String + name: 'operation' + description: | + Output only. The resource name of the operation associated with this batch. + - !ruby/object:Api::Type::Array + name: 'stateHistory' + description: | + Output only. Historical state information for the batch. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::Enum + name: 'state' + description: | + Output only. The state of the batch at this point in history. + values: + - :STATE_UNSPECIFIED + - :PENDING + - :RUNNING + - :CANCELLING + - :CANCELLED + - :SUCCEEDED + - :FAILED + - !ruby/object:Api::Type::String + name: 'stateMessage' + description: | + Output only. Details about the state at this point in history. + - !ruby/object:Api::Type::String + name: 'stateStartTime' + description: | + Output only. The time when the batch entered the historical state. + diff --git a/mmv1/products/dlp/api.yaml b/mmv1/products/dlp/api.yaml index 129513aaa..f1f6debe2 100644 --- a/mmv1/products/dlp/api.yaml +++ b/mmv1/products/dlp/api.yaml @@ -12,7 +12,7 @@ # limitations under the License. --- !ruby/object:Api::Product -name: DataLossPrevention +name: DLP versions: - !ruby/object:Api::Product::Version name: ga @@ -39,7 +39,7 @@ objects: name: 'parent' description: | The parent of the trigger, either in the format `projects/{{project}}` - or `projects/{{project}}/locations/{{location}}` + or `projects/{{project}}/locations/{{location}}` required: true input: true url_param_only: true @@ -47,7 +47,7 @@ objects: - !ruby/object:Api::Type::String name: 'name' description: | - The resource name of the job trigger. Set by the server. + The resource name of the job trigger.Set by the server. output: true - !ruby/object:Api::Type::String name: 'description' @@ -891,14 +891,14 @@ objects: description: | Name describing the field. - !ruby/object:Api::Resource - name: 'DeidentifyTemplate' + name: 'DT' create_url: "{{parent}}/deidentifyTemplates" self_link: "{{parent}}/deidentifyTemplates/{{name}}" base_url: "{{parent}}/deidentifyTemplates" update_verb: :PATCH update_mask: true description: | - Allows creation of templates to de-identify content. + DeidentifyTemplate - Allows creation of templates to de-identify content. references: !ruby/object:Api::Resource::ReferenceLinks guides: 'Official Documentation': @@ -932,20 +932,20 @@ objects: description: | User set display name of the template. - !ruby/object:Api::Type::NestedObject - name: 'deidentifyConfig' + name: 'config' required: true - description: Configuration of the deidentify template + description: deidentifyConfig - Configuration of the deidentify template properties: - !ruby/object:Api::Type::NestedObject - name: 'infoTypeTransformations' + name: 'infoTypeTf' required: true - description: Specifies free-text based transformations to be applied to the dataset. + description: infoTypeTransformations - Specifies free-text based transformations to be applied to the dataset. properties: - !ruby/object:Api::Type::Array - name: 'transformations' + name: 'tf' required: true description: | - Transformation for each infoType. Cannot specify more than one for a given infoType. + transformations - Transformation for each infoType. Cannot specify more than one for a given infoType. item_type: !ruby/object:Api::Type::NestedObject properties: - !ruby/object:Api::Type::Array @@ -961,7 +961,7 @@ objects: description: | Name of the information type. - !ruby/object:Api::Type::NestedObject - name: 'primitiveTransformation' + name: 'primitiveTf' required: true description: | Primitive transformation to apply to the infoType. @@ -1292,4 +1292,852 @@ objects: name: 'radix' description: | The native way to select the alphabet. Must be in the range \[2, 95\]. + - !ruby/object:Api::Resource + name: 'Job' + create_url: "{{parent}}/dlpJobs" + self_link: "{{parent}}/dlpJobs/{{name}}" + base_url: "{{parent}}/dlpJobs" + collection_url_key: 'jobs' + update_verb: :PATCH + update_mask: true + description: | + DlpJob - Allows creation of templates to de-identify content. + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + 'https://cloud.google.com/dlp/docs/concepts-templates' + api: 'https://cloud.google.com/dlp/docs/reference/rest/v2/projects.dlpJobs' + parameters: + - !ruby/object:Api::Type::String + name: 'parent' + description: | + The parent of the template in any of the following formats: + + * `projects/{{project}}` + * `projects/{{project}}/locations/{{location}}` + * `organizations/{{organization_id}}` + * `organizations/{{organization_id}}/locations/{{location}}` + required: true + input: true + url_param_only: true + properties: + - !ruby/object:Api::Type::String + name: 'name' + description: | + The resource name of the template. Set by the server. + output: true + - !ruby/object:Api::Type::Enum + name: 'type' + description: | + An enum to represent the various types of DLP jobs. + values: + - :DLP_JOB_TYPE_UNSPECIFIED + - :INSPECT_JOB + - :RISK_ANALYSIS_JOB + - !ruby/object:Api::Type::Enum + name: 'state' + description: | + Possible states of a job. New items may be added. + values: + - :JOB_STATE_UNSPECIFIED + - :PENDING + - :RUNNING + - :DONE + - :CANCELED + - :FAILED + - :ACTIVE + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Time when the job started. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + output: true + - !ruby/object:Api::Type::String + name: 'startTime' + description: | + Time when the job was created. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + output: true + - !ruby/object:Api::Type::String + name: 'endTime' + description: | + Time when the job started. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + output: true + - !ruby/object:Api::Type::String + name: 'jobTriggerName' + description: If created by a job trigger, the resource name of the trigger that instantiated the job. + output: true + - !ruby/object:Api::Type::Array + name: 'errors' + required: true + description: | + Details information about an error encountered during job execution or the results of an unsuccessful activation of the JobTrigger. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::NestedObject + name: 'details' + required: true + description: Specifies free-text based transformations to be applied to the dataset. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'status' + description: | + The Status type defines a logical error model that is suitable for different programming environments, + including REST APIs and RPC APIs. It is used by gRPC. Each Status message contains three pieces of data: + error code, error message, and error details. + properties: + - !ruby/object:Api::Type::Integer + name: 'code' + required: true + description: | + The status code, which should be an enum value of google.rpc.Code. + - !ruby/object:Api::Type::String + name: 'message' + required: true + description: | + A developer-facing error message, which should be in English. + Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client. + - !ruby/object:Api::Type::Array + name: 'details' + required: true + description: | + A list of messages that carry the error details. There is a common set of message types for APIs to use. + An object containing fields of an arbitrary type. An additional field "type" contains a URI identifying the type. + Example: { "id": 1234, "type": "types.example.com/standard/id" }. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'type' + required: true + description: | + type of field + - !ruby/object:Api::Type::String + name: 'field1' + required: true + description: | + name of field + - !ruby/object:Api::Type::Array + name: 'actDet' + required: true + description: | + actionDetails - Details information about an error encountered during job execution or the results of an unsuccessful activation of the JobTrigger. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::NestedObject + name: 'dtDet' + required: true + description: DeIdentifyDetails - Specifies free-text based transformations to be applied to the dataset. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'deStats' + required: true + description: deidentifyStats - Specifies free-text based transformations to be applied to the dataset. + properties: + - !ruby/object:Api::Type::String + name: 'transformedBytes' + required: true + description: | + Total size in bytes that were transformed in some way. + - !ruby/object:Api::Type::String + name: 'transformationCount' + required: true + description: | + Number of successfully applied transformations. + - !ruby/object:Api::Type::String + name: 'transformationErrorCount' + required: true + description: | + Number of errors encountered while trying to apply transformations. + - !ruby/object:Api::Type::NestedObject + name: 'req_Opt' + description: | + requestedOptions + The Status type defines a logical error model that is suitable for different programming environments, + including REST APIs and RPC APIs. It is used by gRPC. Each Status message contains three pieces of data: + error code, error message, and error details. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'snapshotDT' + description: | + snapshotDeidentifyTemplate + Snapshot of the state of the DeidentifyTemplate from the Deidentify action at the time this job was run. + properties: + - !ruby/object:Api::Type::String + name: 'name' + required: true + description: | + The status code, which should be an enum value of google.rpc.Code. + - !ruby/object:Api::Type::String + name: 'displayName' + required: true + description: | + A developer-facing error message, which should be in English. + Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client. + - !ruby/object:Api::Type::String + name: 'description' + required: true + description: | + A developer-facing error message, which should be in English. + Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client. + - !ruby/object:Api::Type::String + name: 'createTime' + required: true + description: | + A developer-facing error message, which should be in English. + Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client. + - !ruby/object:Api::Type::String + name: 'updateTime' + required: true + description: | + A developer-facing error message, which should be in English. + Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client. + - !ruby/object:Api::Type::NestedObject + name: 'config' + description: | + deidentifyConfig + The Status type defines a logical error model that is suitable for different programming environments, + including REST APIs and RPC APIs. It is used by gRPC. Each Status message contains three pieces of data: + error code, error message, and error details. + properties: + - !ruby/object:Api::Type::String + name: 'tfErrorHandling' + required: true + description: | + transformationErrorHandling + The status code, which should be an enum value of google.rpc.Code. + - !ruby/object:Api::Type::NestedObject + name: 'snapshotStructuredDT' + description: | + snapshotStructuredDeidentifyTemplate + Snapshot of the state of the structured DeidentifyTemplate from the Deidentify action at the time this job was run. + properties: + - !ruby/object:Api::Type::String + name: 'name' + required: true + description: | + The status code, which should be an enum value of google.rpc.Code. + - !ruby/object:Api::Type::String + name: 'displayName' + required: true + description: | + A developer-facing error message, which should be in English. + Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client. + - !ruby/object:Api::Type::String + name: 'description' + required: true + description: | + A developer-facing error message, which should be in English. + Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client. + - !ruby/object:Api::Type::String + name: 'createTime' + required: true + description: | + A developer-facing error message, which should be in English. + Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client. + - !ruby/object:Api::Type::String + name: 'updateTime' + required: true + description: | + A developer-facing error message, which should be in English. + Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client. + - !ruby/object:Api::Type::NestedObject + name: 'deidentifyConfig' + description: | + The Status type defines a logical error model that is suitable for different programming environments, + including REST APIs and RPC APIs. It is used by gRPC. Each Status message contains three pieces of data: + error code, error message, and error details. + properties: + - !ruby/object:Api::Type::String + name: 'tfErrorHandling' + required: true + description: | + transformationErrorHandling + The status code, which should be an enum value of google.rpc.Code. + - !ruby/object:Api::Type::NestedObject + name: 'snapshotImgRT' + description: | + snapshotImageRedactTemplate + Snapshot of the state of the image transformation DeidentifyTemplate from the Deidentify action at the time this job was run. + properties: + - !ruby/object:Api::Type::String + name: 'name' + required: true + description: | + Output only. The template name. + The template will have one of the following formats: + projects/PROJECT_ID/deidentifyTemplates/TEMPLATE_ID OR + organizations/ORGANIZATION_ID/deidentifyTemplates/TEMPLATE_ID + - !ruby/object:Api::Type::String + name: 'displayName' + required: true + description: | + Display name (max 256 chars). + - !ruby/object:Api::Type::String + name: 'description' + required: true + description: | + Short description (max 256 chars). + - !ruby/object:Api::Type::String + name: 'createTime' + required: true + description: | + Output only. The creation timestamp of an inspectTemplate. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + - !ruby/object:Api::Type::String + name: 'updateTime' + required: true + description: | + Output only. The creation timestamp of an inspectTemplate. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + - !ruby/object:Api::Type::NestedObject + name: 'dtCon' + description: | + deidentifyConfig + The configuration that controls how the data will change. + properties: + - !ruby/object:Api::Type::String + name: 'tfErrorHandling' + required: false + description: | + transformationErrorHandling + How to handle transformation errors during de-identification. A transformation error + occurs when the requested transformation is incompatible with the data. For example, + trying to de-identify an IP address using a DateShift transformation would result in a + transformation error, since date info cannot be extracted from an IP address. + Information about any incompatible transformations, and how they were handled, + is returned in the response as part of the TransformationOverviews. + - !ruby/object:Api::Type::NestedObject + name: 'infoTypeTf' + required: true + description: | + infoTypeTransformations + Treat the dataset as free-form text and apply the same free text transformation everywhere. + properties: + - !ruby/object:Api::Type::Array + name: 'tf' + required: false + description: | + transformations + A type of transformation that will scan unstructured text and apply various PrimitiveTransformations + to each finding, where the transformation is applied to only values that were identified as a specific + infoType. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::Array + name: 'infoTypes' + required: true + description: | + InfoTypes to apply the transformation to. An empty list will cause this transformation + to apply to all findings that correspond to infoTypes that were requested in InspectConfig. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'name' + required: true + description: | + Name of the information type. Either a name of your choosing when creating a CustomInfoType, + or one of the names listed at https://cloud.google.com/dlp/docs/infotypes-reference when specifying + a built-in type. When sending Cloud DLP results to Data Catalog, infoType names should conform to the + pattern [A-Za-z0-9$_-]{1,64}. + - !ruby/object:Api::Type::String + name: 'version' + required: true + description: | + Optional version name for this InfoType. + - !ruby/object:Api::Type::NestedObject + name: 'sensitivityScore' + required: true + description: | + Score is calculated from of all elements in the data profile. A higher level means the data is more sensitive. + properties: + - !ruby/object:Api::Type::Enum + name: 'score' + description: | + Various sensitivity score levels for resources. + Enums + **SENSITIVITY_SCORE_UNSPECIFIED** - Unused. + **SENSITIVITY_LOW** - No sensitive information detected. The resource isn't publicly accessible. + **SENSITIVITY_MODERATE** - Medium risk. Contains personally identifiable information (PII), potentially sensitive data, or fields with free-text data that are at a higher risk of having intermittent sensitive data. Consider limiting access. + **SENSITIVITY_HIGH** - High risk. Sensitive personally identifiable information (SPII) can be present. Exfiltration of data can lead to user data loss. Re-identification of users might be possible. Consider limiting usage and or removing SPII. + values: + - :SENSITIVITY_SCORE_UNSPECIFIED + - :SENSITIVITY_LOW + - :SENSITIVITY_MODERATE + - :SENSITIVITY_HIGH + - !ruby/object:Api::Type::NestedObject + name: 'recTf' + required: true + description: | + recordTransformations - Treat the dataset as free-form text and apply the same free text transformation everywhere. + properties: + - !ruby/object:Api::Type::Array + name: 'fieldTf' + required: false + description: | + The transformation to apply to the field. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::Array + name: 'fields' + required: true + description: | + InfoTypes to apply the transformation to. An empty list will cause this transformation + to apply to all findings that correspond to infoTypes that were requested in InspectConfig. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'name' + required: true + description: | + Name describing the field. + - !ruby/object:Api::Type::NestedObject + name: 'con' + required: true + description: | + condition + A condition for determining whether a transformation should be applied to a field. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'exps' + required: true + description: | + expressions + Only apply the transformation if the condition evaluates to true for the given RecordCondition. + The conditions are allowed to reference fields that are not used in the actual transformation.Example Use Cases: + Apply a different bucket transformation to an age column if the zip code column for the same record is within a specific range. + Redact a field if the date of birth field is greater than 85. + properties: + - !ruby/object:Api::Type::Enum + name: 'logicalOperator' + description: | + The operator to apply to the result of conditions. Default and currently only supported value is AND. + Enums + **LOGICAL_OPERATOR_UNSPECIFIED** - Unused + **AND** - Conditional AND + values: + - :LOGICAL_OPERATOR_UNSPECIFIED + - :AND + - !ruby/object:Api::Type::NestedObject + name: 'con' + required: true + description: | + Conditions to apply to the expression. + properties: + - !ruby/object:Api::Type::Array + name: 'cond' + required: true + description: | + A collection of conditions. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::NestedObject + name: 'field' + description: Designated field in the BigQuery table. + required: true + properties: + - !ruby/object:Api::Type::String + name: 'name' + required: true + description: | + Name describing the field. + - !ruby/object:Api::Type::Enum + name: 'operator' + description: | + Operators available for comparing the value of fields. + Enums + **RELATIONAL_OPERATOR_UNSPECIFIED** Unused + **EQUAL_TO** Equal. Attempts to match even with incompatible types. + **NOT_EQUAL_TO** Not equal to. Attempts to match even with incompatible types. + **GREATER_THAN** Greater than. + **LESS_THAN** Less than. + **GREATER_THAN_OR_EQUALS** Greater than or equals. + **LESS_THAN_OR_EQUALS** Less than or equals. + **EXISTS** Exists + values: + - :RELATIONAL_OPERATOR_UNSPECIFIED + - :EQUAL_TO + - :NOT_EQUAL_TO + - :GREATER_THAN + - :LESS_THAN + - :GREATER_THAN_OR_EQUALS + - :LESS_THAN_OR_EQUALS + - :EXISTS + - !ruby/object:Api::Type::NestedObject + name: 'newVal' + required: true + description: | + newValue + Replace each input value with a given value. + properties: + - !ruby/object:Api::Type::Integer + name: 'integerValue' + description: | + An integer value. + - !ruby/object:Api::Type::Double + name: 'floatValue' + description: | + A float value. + - !ruby/object:Api::Type::String + name: 'stringValue' + description: | + A string value. + - !ruby/object:Api::Type::Boolean + name: 'booleanValue' + description: | + A boolean value. + - !ruby/object:Api::Type::String + name: 'timestampValue' + description: | + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + - !ruby/object:Api::Type::NestedObject + name: 'timeVal' + description: | + Represents a time of day. + properties: + - !ruby/object:Api::Type::Integer + name: 'hours' + description: | + Hours of day in 24 hour format. Should be from 0 to 23. + - !ruby/object:Api::Type::Integer + name: 'minutes' + description: | + Minutes of hour of day. Must be from 0 to 59. + - !ruby/object:Api::Type::Integer + name: 'seconds' + description: | + Seconds of minutes of the time. Must normally be from 0 to 59. + - !ruby/object:Api::Type::Integer + name: 'nanos' + description: | + Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. + - !ruby/object:Api::Type::NestedObject + name: 'dateVal' + description: | + dateValue + Represents a whole or partial calendar date. + properties: + - !ruby/object:Api::Type::Integer + name: 'year' + description: | + Year of date. Must be from 1 to 9999, or 0 if specifying a date without a year. + - !ruby/object:Api::Type::Integer + name: 'month' + description: | + Month of year. Must be from 1 to 12, or 0 if specifying a year without a month and day. + - !ruby/object:Api::Type::Integer + name: 'day' + description: | + Day of month. Must be from 1 to 31 and valid for the year and month, or 0 if specifying a + year by itself or a year and month where the day is not significant. + - !ruby/object:Api::Type::Enum + name: 'dayOfWeekVal' + description: | + datOfWeekValue + Represents a day of the week. + values: + - :MONDAY + - :TUESDAY + - :WEDNESDAY + - :THURSDAY + - :FRIDAY + - :SATURDAY + - :SUNDAY + - !ruby/object:Api::Type::Array + name: 'recSup' + required: false + description: | + recordSuppressions - The transformation to apply to the field. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::NestedObject + name: 'con' + required: true + description: | + A condition for determining whether a transformation should be applied to a field. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'exp' + required: true + description: | + expressions + Only apply the transformation if the condition evaluates to true for the given RecordCondition. + The conditions are allowed to reference fields that are not used in the actual transformation.Example Use Cases: + Apply a different bucket transformation to an age column if the zip code column for the same record is within a specific range. + Redact a field if the date of birth field is greater than 85. + properties: + - !ruby/object:Api::Type::Enum + name: 'logicalOperator' + description: | + The operator to apply to the result of conditions. Default and currently only supported value is AND. + Enums + **LOGICAL_OPERATOR_UNSPECIFIED** - Unused + **AND** - Conditional AND + values: + - :LOGICAL_OPERATOR_UNSPECIFIED + - :AND + - !ruby/object:Api::Type::NestedObject + name: 'con' + required: true + description: | + Conditions to apply to the expression. + properties: + - !ruby/object:Api::Type::Array + name: 'con' + required: true + description: | + A collection of conditions. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::NestedObject + name: 'field' + description: Designated field in the BigQuery table. + required: true + properties: + - !ruby/object:Api::Type::String + name: 'name' + required: true + description: | + Name describing the field. + - !ruby/object:Api::Type::Enum + name: 'operator' + description: | + Operators available for comparing the value of fields. + Enums + **RELATIONAL_OPERATOR_UNSPECIFIED** Unused + **EQUAL_TO** Equal. Attempts to match even with incompatible types. + **NOT_EQUAL_TO** Not equal to. Attempts to match even with incompatible types. + **GREATER_THAN** Greater than. + **LESS_THAN** Less than. + **GREATER_THAN_OR_EQUALS** Greater than or equals. + **LESS_THAN_OR_EQUALS** Less than or equals. + **EXISTS** Exists + values: + - :RELATIONAL_OPERATOR_UNSPECIFIED + - :EQUAL_TO + - :NOT_EQUAL_TO + - :GREATER_THAN + - :LESS_THAN + - :GREATER_THAN_OR_EQUALS + - :LESS_THAN_OR_EQUALS + - :EXISTS + - !ruby/object:Api::Type::NestedObject + name: 'newVal' + required: true + description: | + newValue + Replace each input value with a given value. + properties: + - !ruby/object:Api::Type::Integer + name: 'integerVal' + description: | + An integer value. + - !ruby/object:Api::Type::Double + name: 'floatVal' + description: | + A float value. + - !ruby/object:Api::Type::String + name: 'stringVal' + description: | + A string value. + - !ruby/object:Api::Type::Boolean + name: 'booleanVal' + description: | + A boolean value. + - !ruby/object:Api::Type::String + name: 'timestampVal' + description: | + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + - !ruby/object:Api::Type::NestedObject + name: 'timeVal' + description: | + timeValue + Represents a time of day. + properties: + - !ruby/object:Api::Type::Integer + name: 'hours' + description: | + Hours of day in 24 hour format. Should be from 0 to 23. + - !ruby/object:Api::Type::Integer + name: 'minutes' + description: | + Minutes of hour of day. Must be from 0 to 59. + - !ruby/object:Api::Type::Integer + name: 'seconds' + description: | + Seconds of minutes of the time. Must normally be from 0 to 59. + - !ruby/object:Api::Type::Integer + name: 'nanos' + description: | + Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. + - !ruby/object:Api::Type::NestedObject + name: 'dateVal' + description: | + dateValue + Represents a whole or partial calendar date. + properties: + - !ruby/object:Api::Type::Integer + name: 'year' + description: | + Year of date. Must be from 1 to 9999, or 0 if specifying a date without a year. + - !ruby/object:Api::Type::Integer + name: 'month' + description: | + Month of year. Must be from 1 to 12, or 0 if specifying a year without a month and day. + - !ruby/object:Api::Type::Integer + name: 'day' + description: | + Day of month. Must be from 1 to 31 and valid for the year and month, or 0 if specifying a + year by itself or a year and month where the day is not significant. + - !ruby/object:Api::Type::Enum + name: 'dayOfWeekVal' + description: | + dayOfWeekValue + Represents a day of the week. + values: + - :MONDAY + - :TUESDAY + - :WEDNESDAY + - :THURSDAY + - :FRIDAY + - :SATURDAY + - :SUNDAY + - !ruby/object:Api::Type::NestedObject + name: 'ImageTf' + required: true + description: | + ImageTransformations + Treat the dataset as free-form text and apply the same free text transformation everywhere. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'redactionColor' + required: true + description: | + Treat the dataset as free-form text and apply the same free text transformation everywhere. + properties: + - !ruby/object:Api::Type::String + name: 'red' + required: true + description: red color + - !ruby/object:Api::Type::String + name: 'green' + required: true + description: green color + - !ruby/object:Api::Type::String + name: 'blue' + required: true + description: blue color + - !ruby/object:Api::Type::Array + name: 'selectedInfoTypes' + description: | + Restricts what infoTypes to look for. The values must correspond to InfoType values returned by infoTypes.list + or listed at https://cloud.google.com/dlp/docs/infotypes-reference. + When no InfoTypes or CustomInfoTypes are specified in a request, the system may automatically choose what detectors to run. + By default this may be all types, but may change over time as detectors are updated. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'name' + required: true + description: | + Name of the information type. Either a name of your choosing when creating a CustomInfoType, or one of the names listed + at https://cloud.google.com/dlp/docs/infotypes-reference when specifying a built-in type. + - !ruby/object:Api::Type::Array + name: 'allInfoTypes' + description: | + Restricts what infoTypes to look for. The values must correspond to InfoType values returned by infoTypes.list + or listed at https://cloud.google.com/dlp/docs/infotypes-reference. + + When no InfoTypes or CustomInfoTypes are specified in a request, the system may automatically choose what detectors to run. + By default this may be all types, but may change over time as detectors are updated. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'name' + required: true + description: | + Name of the information type. Either a name of your choosing when creating a CustomInfoType, or one of the names listed + at https://cloud.google.com/dlp/docs/infotypes-reference when specifying a built-in type. + - !ruby/object:Api::Type::NestedObject + name: 'allText' + required: true + description: | + Apply to all text. + properties: + - !ruby/object:Api::Type::String + name: 'red' + required: true + description: red color + - !ruby/object:Api::Type::String + name: 'green' + required: true + description: green color + - !ruby/object:Api::Type::String + name: 'blue' + required: true + description: blue color + + + + - !ruby/object:Api::Type::NestedObject + name: 'deidentifyStats' + required: true + description: Specifies free-text based transformations to be applied to the dataset. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'requestedOptions' + description: | + The Status type defines a logical error model that is suitable for different programming environments, + including REST APIs and RPC APIs. It is used by gRPC. Each Status message contains three pieces of data: + error code, error message, and error details. + properties: + - !ruby/object:Api::Type::Integer + name: 'code' + required: true + description: | + The status code, which should be an enum value of google.rpc.Code. + - !ruby/object:Api::Type::String + name: 'message' + required: true + description: | + A developer-facing error message, which should be in English. + Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client. + - !ruby/object:Api::Type::Array + name: 'details' + required: true + description: | + A list of messages that carry the error details. There is a common set of message types for APIs to use. + An object containing fields of an arbitrary type. An additional field "type" contains a URI identifying the type. + Example: { "id": 1234, "type": "types.example.com/standard/id" }. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'type' + required: true + description: | + type of field + - !ruby/object:Api::Type::String + name: 'field1' + required: true + description: | + name of field + + + - !ruby/object:Api::Type::String + name: 'riskDetails' + required: true + description: | + Result of a risk analysis operation request. + - !ruby/object:Api::Type::String + name: 'inspectDetails' + required: true + description: | + Results from inspecting a data source. diff --git a/mmv1/products/dlp/inspec.yaml b/mmv1/products/dlp/inspec.yaml new file mode 100644 index 000000000..28cb365e9 --- /dev/null +++ b/mmv1/products/dlp/inspec.yaml @@ -0,0 +1,15 @@ +# Copyright 2017 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- !ruby/object:Provider::Inspec::Config +overrides: !ruby/object:Overrides::ResourceOverrides diff --git a/mmv1/products/iam/api.yaml b/mmv1/products/iam/api.yaml index 9a7b00df2..4587887b0 100644 --- a/mmv1/products/iam/api.yaml +++ b/mmv1/products/iam/api.yaml @@ -195,4 +195,92 @@ objects: - !ruby/object:Api::Type::Boolean name: 'deleted' description: The current deleted state of the role - output: true \ No newline at end of file + output: true + + + - !ruby/object:Api::Resource + name: ProjectServiceAccountKey + base_url: '{{name}}/keys' + self_link: '{{name}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/iam/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + Represents a service account key. A service account has two sets of key-pairs: user-managed, and system-managed. User-managed key-pairs can be created and deleted by users. Users are responsible for rotating these keys periodically to ensure security of their service accounts. Users retain the private key of these key-pairs, and Google retains ONLY the public key. System-managed keys are automatically rotated by Google, and are used for signing for a maximum of two weeks. The rotation process is probabilistic, and usage of the new key will gradually ramp up and down over the key's lifetime. If you cache the public key set for a service account, we recommend that you update the cache every 15 minutes. User-managed keys can be added and removed at any time, so it is important to update the cache frequently. For Google-managed keys, Google will publish a key at least 6 hours before it is first used for signing and will keep publishing it for at least 6 hours after it was last used for signing. Public keys for all service accounts are also published at the OAuth2 Service Account API. + properties: + + - !ruby/object:Api::Type::String + name: 'name' + description: | + The resource name of the service account key in the following format `projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}/keys/{key}`. + - !ruby/object:Api::Type::Enum + name: 'privateKeyType' + description: | + The output format for the private key. Only provided in `CreateServiceAccountKey` responses, not in `GetServiceAccountKey` or `ListServiceAccountKey` responses. Google never exposes system-managed private keys, and never retains user-managed private keys. + values: + - :TYPE_UNSPECIFIED + - :TYPE_PKCS12_FILE + - :TYPE_GOOGLE_CREDENTIALS_FILE + - !ruby/object:Api::Type::Enum + name: 'keyAlgorithm' + description: | + Specifies the algorithm (and possibly key size) for the key. + values: + - :KEY_ALG_UNSPECIFIED + - :KEY_ALG_RSA_1024 + - :KEY_ALG_RSA_2048 + - !ruby/object:Api::Type::String + name: 'privateKeyData' + description: | + The private key data. Only provided in `CreateServiceAccountKey` responses. Make sure to keep the private key data secure because it allows for the assertion of the service account identity. When base64 decoded, the private key data can be used to authenticate with Google API client libraries and with gcloud auth activate-service-account. + - !ruby/object:Api::Type::String + name: 'publicKeyData' + description: | + The public key data. Only provided in `GetServiceAccountKey` responses. + - !ruby/object:Api::Type::String + name: 'validAfterTime' + description: | + The key can be used after this timestamp. + - !ruby/object:Api::Type::String + name: 'validBeforeTime' + description: | + The key can be used before this timestamp. For system-managed key pairs, this timestamp is the end time for the private key signing operation. The public key could still be used for verification for a few hours after this time. + - !ruby/object:Api::Type::Enum + name: 'keyOrigin' + description: | + The key origin. + values: + - :ORIGIN_UNSPECIFIED + - :USER_PROVIDED + - :GOOGLE_PROVIDED + - !ruby/object:Api::Type::Enum + name: 'keyType' + description: | + The key type. + values: + - :KEY_TYPE_UNSPECIFIED + - :USER_MANAGED + - :SYSTEM_MANAGED + - !ruby/object:Api::Type::Boolean + name: 'disabled' + description: | + The key status. + diff --git a/mmv1/products/metastore/api.yaml b/mmv1/products/metastore/api.yaml index a860ffb1d..bda9e5084 100644 --- a/mmv1/products/metastore/api.yaml +++ b/mmv1/products/metastore/api.yaml @@ -15,7 +15,7 @@ name: DataprocMetastore versions: - !ruby/object:Api::Product::Version - name: beta + name: ga base_url: https://metastore.googleapis.com/v1beta/ scopes: - https://www.googleapis.com/auth/cloud-identity @@ -188,3 +188,2001 @@ objects: required: true description: | A Cloud Storage URI that specifies the path to a krb5.conf file. It is of the form gs://{bucket_name}/path/to/krb5.conf, although the file does not need to be named krb5.conf explicitly. + + + + - !ruby/object:Api::Resource + name: 'ProjectLocationFederation' + base_url: '{{+parent}}/federations' + self_link: '{{+name}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/metastore/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + Represents a federation of multiple backend metastores. + properties: + + - !ruby/object:Api::Type::String + name: 'name' + description: | + Immutable. The relative resource name of the federation, of the form: projects/{project_number}/locations/{location_id}/federations/{federation_id}`. + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Output only. The time when the metastore federation was created. + - !ruby/object:Api::Type::String + name: 'updateTime' + description: | + Output only. The time when the metastore federation was last updated. + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + User-defined labels for the metastore federation. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::String + name: 'version' + description: | + Immutable. The Apache Hive metastore version of the federation. All backend metastore versions must be compatible with the federation version. + - !ruby/object:Api::Type::NestedObject + name: 'backendMetastores' + description: | + A map from BackendMetastore rank to BackendMetastores from which the federation service serves metadata at query time. The map key represents the order in which BackendMetastores should be evaluated to resolve database names at query time and should be greater than or equal to zero. A BackendMetastore with a lower number will be evaluated before a BackendMetastore with a higher number. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + Represents a backend metastore for the federation. + - !ruby/object:Api::Type::String + name: 'endpointUri' + description: | + Output only. The federation endpoint. + - !ruby/object:Api::Type::Enum + name: 'state' + description: | + Output only. The current state of the federation. + values: + - :STATE_UNSPECIFIED + - :CREATING + - :ACTIVE + - :UPDATING + - :DELETING + - :ERROR + - !ruby/object:Api::Type::String + name: 'stateMessage' + description: | + Output only. Additional information about the current state of the metastore federation, if available. + - !ruby/object:Api::Type::String + name: 'uid' + description: | + Output only. The globally unique resource identifier of the metastore federation. + + + + + + + - !ruby/object:Api::Resource + name: ProjectLocationService + base_url: '{{+parent}}/services' + self_link: '{{+name}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/metastore/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + A managed metastore service that serves metadata queries. + properties: + + - !ruby/object:Api::Type::NestedObject + name: 'hiveMetastoreConfig' + description: | + Specifies configuration information specific to running Hive metastore software as the metastore service. + properties: + - !ruby/object:Api::Type::String + name: 'version' + description: | + Immutable. The Hive metastore schema version. + - !ruby/object:Api::Type::NestedObject + name: 'configOverrides' + description: | + A mapping of Hive metastore configuration key-value pairs to apply to the Hive metastore (configured in hive-site.xml). The mappings override system defaults (some keys cannot be overridden). These overrides are also applied to auxiliary versions and can be further customized in the auxiliary version's AuxiliaryVersionConfig. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'kerberosConfig' + description: | + Configuration information for a Kerberos principal. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'keytab' + description: | + A securely stored value. + properties: + - !ruby/object:Api::Type::String + name: 'cloudSecret' + description: | + The relative resource name of a Secret Manager secret version, in the following form:projects/{project_number}/secrets/{secret_id}/versions/{version_id}. + - !ruby/object:Api::Type::String + name: 'principal' + description: | + A Kerberos principal that exists in the both the keytab the KDC to authenticate as. A typical principal is of the form primary/instance@REALM, but there is no exact format. + - !ruby/object:Api::Type::String + name: 'krb5ConfigGcsUri' + description: | + A Cloud Storage URI that specifies the path to a krb5.conf file. It is of the form gs://{bucket_name}/path/to/krb5.conf, although the file does not need to be named krb5.conf explicitly. + - !ruby/object:Api::Type::Enum + name: 'endpointProtocol' + description: | + The protocol to use for the metastore service endpoint. If unspecified, defaults to THRIFT. + values: + - :ENDPOINT_PROTOCOL_UNSPECIFIED + - :THRIFT + - :GRPC + - !ruby/object:Api::Type::NestedObject + name: 'auxiliaryVersions' + description: | + A mapping of Hive metastore version to the auxiliary version configuration. When specified, a secondary Hive metastore service is created along with the primary service. All auxiliary versions must be less than the service's primary version. The key is the auxiliary service name and it must match the regular expression a-z?. This means that the first character must be a lowercase letter, and all the following characters must be hyphens, lowercase letters, or digits, except the last character, which cannot be a hyphen. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + Configuration information for the auxiliary service versions. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Immutable. The relative resource name of the metastore service, in the following format:projects/{project_number}/locations/{location_id}/services/{service_id}. + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Output only. The time when the metastore service was created. + - !ruby/object:Api::Type::String + name: 'updateTime' + description: | + Output only. The time when the metastore service was last updated. + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + User-defined labels for the metastore service. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::String + name: 'network' + description: | + Immutable. The relative resource name of the VPC network on which the instance can be accessed. It is specified in the following form:projects/{project_number}/global/networks/{network_id}. + - !ruby/object:Api::Type::String + name: 'endpointUri' + description: | + Output only. The URI of the endpoint used to access the metastore service. + - !ruby/object:Api::Type::Integer + name: 'port' + description: | + The TCP port at which the metastore service is reached. Default: 9083. + - !ruby/object:Api::Type::Enum + name: 'state' + description: | + Output only. The current state of the metastore service. + values: + - :STATE_UNSPECIFIED + - :CREATING + - :ACTIVE + - :SUSPENDING + - :SUSPENDED + - :UPDATING + - :DELETING + - :ERROR + - :MIGRATING + - !ruby/object:Api::Type::String + name: 'stateMessage' + description: | + Output only. Additional information about the current state of the metastore service, if available. + - !ruby/object:Api::Type::String + name: 'artifactGcsUri' + description: | + Output only. A Cloud Storage URI (starting with gs://) that specifies where artifacts related to the metastore service are stored. + - !ruby/object:Api::Type::Enum + name: 'tier' + description: | + The tier of the service. + values: + - :TIER_UNSPECIFIED + - :DEVELOPER + - :ENTERPRISE + - !ruby/object:Api::Type::NestedObject + name: 'metadataIntegration' + description: | + Specifies how metastore metadata should be integrated with external services. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'dataCatalogConfig' + description: | + Specifies how metastore metadata should be integrated with the Data Catalog service. + properties: + - !ruby/object:Api::Type::Boolean + name: 'enabled' + description: | + Optional. Defines whether the metastore metadata should be synced to Data Catalog. The default value is to disable syncing metastore metadata to Data Catalog. + - !ruby/object:Api::Type::NestedObject + name: 'dataplexConfig' + description: | + Specifies how metastore metadata should be integrated with the Dataplex service. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'lakeResources' + description: | + A reference to the Lake resources that this metastore service is attached to. The key is the lake resource name. Example: projects/{project_number}/locations/{location_id}/lakes/{lake_id}. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + Represents a Lake resource + - !ruby/object:Api::Type::NestedObject + name: 'maintenanceWindow' + description: | + Maintenance window. This specifies when Dataproc Metastore may perform system maintenance operation to the service. + properties: + - !ruby/object:Api::Type::Integer + name: 'hourOfDay' + description: | + The hour of day (0-23) when the window starts. + - !ruby/object:Api::Type::Enum + name: 'dayOfWeek' + description: | + The day of week, when the window starts. + values: + - :DAY_OF_WEEK_UNSPECIFIED + - :MONDAY + - :TUESDAY + - :WEDNESDAY + - :THURSDAY + - :FRIDAY + - :SATURDAY + - :SUNDAY + - !ruby/object:Api::Type::String + name: 'uid' + description: | + Output only. The globally unique resource identifier of the metastore service. + - !ruby/object:Api::Type::NestedObject + name: 'metadataManagementActivity' + description: | + The metadata management activities of the metastore service. + properties: + - !ruby/object:Api::Type::Array + name: 'metadataExports' + description: | + Output only. The latest metadata exports of the metastore service. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'destinationGcsUri' + description: | + Output only. A Cloud Storage URI of a folder that metadata are exported to, in the form of gs:////, where is automatically generated. + - !ruby/object:Api::Type::String + name: 'startTime' + description: | + Output only. The time when the export started. + - !ruby/object:Api::Type::String + name: 'endTime' + description: | + Output only. The time when the export ended. + - !ruby/object:Api::Type::Enum + name: 'state' + description: | + Output only. The current state of the export. + values: + - :STATE_UNSPECIFIED + - :RUNNING + - :SUCCEEDED + - :FAILED + - :CANCELLED + - !ruby/object:Api::Type::Enum + name: 'databaseDumpType' + description: | + Output only. The type of the database dump. + values: + - :TYPE_UNSPECIFIED + - :MYSQL + - :AVRO + - !ruby/object:Api::Type::Array + name: 'restores' + description: | + Output only. The latest restores of the metastore service. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'startTime' + description: | + Output only. The time when the restore started. + - !ruby/object:Api::Type::String + name: 'endTime' + description: | + Output only. The time when the restore ended. + - !ruby/object:Api::Type::Enum + name: 'state' + description: | + Output only. The current state of the restore. + values: + - :STATE_UNSPECIFIED + - :RUNNING + - :SUCCEEDED + - :FAILED + - :CANCELLED + - !ruby/object:Api::Type::String + name: 'backup' + description: | + Output only. The relative resource name of the metastore service backup to restore from, in the following form:projects/{project_id}/locations/{location_id}/services/{service_id}/backups/{backup_id}. + - !ruby/object:Api::Type::Enum + name: 'type' + description: | + Output only. The type of restore. + values: + - :RESTORE_TYPE_UNSPECIFIED + - :FULL + - :METADATA_ONLY + - !ruby/object:Api::Type::String + name: 'details' + description: | + Output only. The restore details containing the revision of the service to be restored to, in format of JSON. + - !ruby/object:Api::Type::String + name: 'backupLocation' + description: | + Optional. A Cloud Storage URI specifying where the backup artifacts are stored, in the format gs:///. + - !ruby/object:Api::Type::Enum + name: 'releaseChannel' + description: | + Immutable. The release channel of the service. If unspecified, defaults to STABLE. + values: + - :RELEASE_CHANNEL_UNSPECIFIED + - :CANARY + - :STABLE + - !ruby/object:Api::Type::NestedObject + name: 'encryptionConfig' + description: | + Encryption settings for the service. + properties: + - !ruby/object:Api::Type::String + name: 'kmsKey' + description: | + The fully qualified customer provided Cloud KMS key name to use for customer data encryption, in the following format:projects/{project_number}/locations/{location_id}/keyRings/{key_ring_id}/cryptoKeys/{crypto_key_id}. + - !ruby/object:Api::Type::NestedObject + name: 'networkConfig' + description: | + Network configuration for the Dataproc Metastore service. + properties: + - !ruby/object:Api::Type::Array + name: 'consumers' + description: | + Immutable. The consumer-side network configuration for the Dataproc Metastore instance. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'subnetwork' + description: | + Immutable. The subnetwork of the customer project from which an IP address is reserved and used as the Dataproc Metastore service's endpoint. It is accessible to hosts in the subnet and to all hosts in a subnet in the same region and same network. There must be at least one IP address available in the subnet's primary range. The subnet is specified in the following form:projects/{project_number}/regions/{region_id}/subnetworks/{subnetwork_id} + - !ruby/object:Api::Type::String + name: 'endpointUri' + description: | + Output only. The URI of the endpoint used to access the metastore service. + - !ruby/object:Api::Type::String + name: 'endpointLocation' + description: | + Output only. The location of the endpoint URI. Format: projects/{project}/locations/{location}. + - !ruby/object:Api::Type::Boolean + name: 'customRoutesEnabled' + description: | + Enables custom routes to be imported and exported for the Dataproc Metastore service's peered VPC network. + - !ruby/object:Api::Type::Enum + name: 'databaseType' + description: | + Immutable. The database type that the Metastore service stores its data. + values: + - :DATABASE_TYPE_UNSPECIFIED + - :MYSQL + - :SPANNER + - !ruby/object:Api::Type::NestedObject + name: 'telemetryConfig' + description: | + Telemetry Configuration for the Dataproc Metastore service. + properties: + - !ruby/object:Api::Type::Enum + name: 'logFormat' + description: | + The output format of the Dataproc Metastore service's logs. + values: + - :LOG_FORMAT_UNSPECIFIED + - :LEGACY + - :JSON + - !ruby/object:Api::Type::NestedObject + name: 'scalingConfig' + description: | + Represents the scaling configuration of a metastore service. + properties: + - !ruby/object:Api::Type::Enum + name: 'instanceSize' + description: | + An enum of readable instance sizes, with each instance size mapping to a float value (e.g. InstanceSize.EXTRA_SMALL = scaling_factor(0.1)) + values: + - :INSTANCE_SIZE_UNSPECIFIED + - :EXTRA_SMALL + - :SMALL + - :MEDIUM + - :LARGE + - :EXTRA_LARGE + - !ruby/object:Api::Type::Integer + name: 'scalingFactor' + description: | + Scaling factor, increments of 0.1 for values less than 1.0, and increments of 1.0 for values greater than 1.0. + - !ruby/object:Api::Type::NestedObject + name: 'autoscalingConfig' + description: | + Represents the autoscaling configuration of a metastore service. + properties: + - !ruby/object:Api::Type::Integer + name: 'autoscalingFactor' + description: | + Output only. The scaling factor of a service with autoscaling enabled. + - !ruby/object:Api::Type::Boolean + name: 'autoscalingEnabled' + description: | + Optional. Whether or not autoscaling is enabled for this service. + - !ruby/object:Api::Type::NestedObject + name: 'limitConfig' + description: | + Represents the autoscaling limit configuration of a metastore service. + properties: + - !ruby/object:Api::Type::Integer + name: 'maxScalingFactor' + description: | + Optional. The highest scaling factor that the service should be autoscaled to. + - !ruby/object:Api::Type::Integer + name: 'minScalingFactor' + description: | + Optional. The lowest scaling factor that the service should be autoscaled to. + - !ruby/object:Api::Type::NestedObject + name: 'scheduledBackup' + description: | + This specifies the configuration of scheduled backup. + properties: + - !ruby/object:Api::Type::Boolean + name: 'enabled' + description: | + Optional. Defines whether the scheduled backup is enabled. The default value is false. + - !ruby/object:Api::Type::String + name: 'cronSchedule' + description: | + Optional. The scheduled interval in Cron format, see https://en.wikipedia.org/wiki/Cron The default is empty: scheduled backup is not enabled. Must be specified to enable scheduled backups. + - !ruby/object:Api::Type::String + name: 'timeZone' + description: | + Optional. Specifies the time zone to be used when interpreting cron_schedule. Must be a time zone name from the time zone database (https://en.wikipedia.org/wiki/List_of_tz_database_time_zones), e.g. America/Los_Angeles or Africa/Abidjan. If left unspecified, the default is UTC. + - !ruby/object:Api::Type::String + name: 'nextScheduledTime' + description: | + Output only. The time when the next backups execution is scheduled to start. + - !ruby/object:Api::Type::String + name: 'backupLocation' + description: | + Optional. A Cloud Storage URI of a folder, in the format gs:///. A sub-folder containing backup files will be stored below it. + - !ruby/object:Api::Type::NestedObject + name: 'latestBackup' + description: | + The details of the latest scheduled backup. + properties: + - !ruby/object:Api::Type::String + name: 'backupId' + description: | + Output only. The ID of an in-progress scheduled backup. Empty if no backup is in progress. + - !ruby/object:Api::Type::String + name: 'startTime' + description: | + Output only. The time when the backup was started. + - !ruby/object:Api::Type::Enum + name: 'state' + description: | + Output only. The current state of the backup. + values: + - :STATE_UNSPECIFIED + - :IN_PROGRESS + - :SUCCEEDED + - :FAILED + - !ruby/object:Api::Type::String + name: 'duration' + description: | + Output only. The duration of the backup completion. + - !ruby/object:Api::Type::Boolean + name: 'deletionProtection' + description: | + Optional. Indicates if the dataproc metastore should be protected against accidental deletions. + + + + + - !ruby/object:Api::Resource + name: ProjectLocationService + base_url: '{{+parent}}/services' + self_link: '{{+name}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/metastore/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + A managed metastore service that serves metadata queries. + properties: + + - !ruby/object:Api::Type::NestedObject + name: 'hiveMetastoreConfig' + description: | + Specifies configuration information specific to running Hive metastore software as the metastore service. + properties: + - !ruby/object:Api::Type::String + name: 'version' + description: | + Immutable. The Hive metastore schema version. + - !ruby/object:Api::Type::NestedObject + name: 'configOverrides' + description: | + A mapping of Hive metastore configuration key-value pairs to apply to the Hive metastore (configured in hive-site.xml). The mappings override system defaults (some keys cannot be overridden). These overrides are also applied to auxiliary versions and can be further customized in the auxiliary version's AuxiliaryVersionConfig. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'kerberosConfig' + description: | + Configuration information for a Kerberos principal. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'keytab' + description: | + A securely stored value. + properties: + - !ruby/object:Api::Type::String + name: 'cloudSecret' + description: | + The relative resource name of a Secret Manager secret version, in the following form:projects/{project_number}/secrets/{secret_id}/versions/{version_id}. + - !ruby/object:Api::Type::String + name: 'principal' + description: | + A Kerberos principal that exists in the both the keytab the KDC to authenticate as. A typical principal is of the form primary/instance@REALM, but there is no exact format. + - !ruby/object:Api::Type::String + name: 'krb5ConfigGcsUri' + description: | + A Cloud Storage URI that specifies the path to a krb5.conf file. It is of the form gs://{bucket_name}/path/to/krb5.conf, although the file does not need to be named krb5.conf explicitly. + - !ruby/object:Api::Type::Enum + name: 'endpointProtocol' + description: | + The protocol to use for the metastore service endpoint. If unspecified, defaults to THRIFT. + values: + - :ENDPOINT_PROTOCOL_UNSPECIFIED + - :THRIFT + - :GRPC + - !ruby/object:Api::Type::NestedObject + name: 'auxiliaryVersions' + description: | + A mapping of Hive metastore version to the auxiliary version configuration. When specified, a secondary Hive metastore service is created along with the primary service. All auxiliary versions must be less than the service's primary version. The key is the auxiliary service name and it must match the regular expression a-z?. This means that the first character must be a lowercase letter, and all the following characters must be hyphens, lowercase letters, or digits, except the last character, which cannot be a hyphen. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + Configuration information for the auxiliary service versions. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Immutable. The relative resource name of the metastore service, in the following format:projects/{project_number}/locations/{location_id}/services/{service_id}. + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Output only. The time when the metastore service was created. + - !ruby/object:Api::Type::String + name: 'updateTime' + description: | + Output only. The time when the metastore service was last updated. + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + User-defined labels for the metastore service. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::String + name: 'network' + description: | + Immutable. The relative resource name of the VPC network on which the instance can be accessed. It is specified in the following form:projects/{project_number}/global/networks/{network_id}. + - !ruby/object:Api::Type::String + name: 'endpointUri' + description: | + Output only. The URI of the endpoint used to access the metastore service. + - !ruby/object:Api::Type::Integer + name: 'port' + description: | + The TCP port at which the metastore service is reached. Default: 9083. + - !ruby/object:Api::Type::Enum + name: 'state' + description: | + Output only. The current state of the metastore service. + values: + - :STATE_UNSPECIFIED + - :CREATING + - :ACTIVE + - :SUSPENDING + - :SUSPENDED + - :UPDATING + - :DELETING + - :ERROR + - :MIGRATING + - !ruby/object:Api::Type::String + name: 'stateMessage' + description: | + Output only. Additional information about the current state of the metastore service, if available. + - !ruby/object:Api::Type::String + name: 'artifactGcsUri' + description: | + Output only. A Cloud Storage URI (starting with gs://) that specifies where artifacts related to the metastore service are stored. + - !ruby/object:Api::Type::Enum + name: 'tier' + description: | + The tier of the service. + values: + - :TIER_UNSPECIFIED + - :DEVELOPER + - :ENTERPRISE + - !ruby/object:Api::Type::NestedObject + name: 'metadataIntegration' + description: | + Specifies how metastore metadata should be integrated with external services. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'dataCatalogConfig' + description: | + Specifies how metastore metadata should be integrated with the Data Catalog service. + properties: + - !ruby/object:Api::Type::Boolean + name: 'enabled' + description: | + Optional. Defines whether the metastore metadata should be synced to Data Catalog. The default value is to disable syncing metastore metadata to Data Catalog. + - !ruby/object:Api::Type::NestedObject + name: 'dataplexConfig' + description: | + Specifies how metastore metadata should be integrated with the Dataplex service. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'lakeResources' + description: | + A reference to the Lake resources that this metastore service is attached to. The key is the lake resource name. Example: projects/{project_number}/locations/{location_id}/lakes/{lake_id}. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + Represents a Lake resource + - !ruby/object:Api::Type::NestedObject + name: 'maintenanceWindow' + description: | + Maintenance window. This specifies when Dataproc Metastore may perform system maintenance operation to the service. + properties: + - !ruby/object:Api::Type::Integer + name: 'hourOfDay' + description: | + The hour of day (0-23) when the window starts. + - !ruby/object:Api::Type::Enum + name: 'dayOfWeek' + description: | + The day of week, when the window starts. + values: + - :DAY_OF_WEEK_UNSPECIFIED + - :MONDAY + - :TUESDAY + - :WEDNESDAY + - :THURSDAY + - :FRIDAY + - :SATURDAY + - :SUNDAY + - !ruby/object:Api::Type::String + name: 'uid' + description: | + Output only. The globally unique resource identifier of the metastore service. + - !ruby/object:Api::Type::NestedObject + name: 'metadataManagementActivity' + description: | + The metadata management activities of the metastore service. + properties: + - !ruby/object:Api::Type::Array + name: 'metadataExports' + description: | + Output only. The latest metadata exports of the metastore service. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'destinationGcsUri' + description: | + Output only. A Cloud Storage URI of a folder that metadata are exported to, in the form of gs:////, where is automatically generated. + - !ruby/object:Api::Type::String + name: 'startTime' + description: | + Output only. The time when the export started. + - !ruby/object:Api::Type::String + name: 'endTime' + description: | + Output only. The time when the export ended. + - !ruby/object:Api::Type::Enum + name: 'state' + description: | + Output only. The current state of the export. + values: + - :STATE_UNSPECIFIED + - :RUNNING + - :SUCCEEDED + - :FAILED + - :CANCELLED + - !ruby/object:Api::Type::Enum + name: 'databaseDumpType' + description: | + Output only. The type of the database dump. + values: + - :TYPE_UNSPECIFIED + - :MYSQL + - :AVRO + - !ruby/object:Api::Type::Array + name: 'restores' + description: | + Output only. The latest restores of the metastore service. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'startTime' + description: | + Output only. The time when the restore started. + - !ruby/object:Api::Type::String + name: 'endTime' + description: | + Output only. The time when the restore ended. + - !ruby/object:Api::Type::Enum + name: 'state' + description: | + Output only. The current state of the restore. + values: + - :STATE_UNSPECIFIED + - :RUNNING + - :SUCCEEDED + - :FAILED + - :CANCELLED + - !ruby/object:Api::Type::String + name: 'backup' + description: | + Output only. The relative resource name of the metastore service backup to restore from, in the following form:projects/{project_id}/locations/{location_id}/services/{service_id}/backups/{backup_id}. + - !ruby/object:Api::Type::Enum + name: 'type' + description: | + Output only. The type of restore. + values: + - :RESTORE_TYPE_UNSPECIFIED + - :FULL + - :METADATA_ONLY + - !ruby/object:Api::Type::String + name: 'details' + description: | + Output only. The restore details containing the revision of the service to be restored to, in format of JSON. + - !ruby/object:Api::Type::String + name: 'backupLocation' + description: | + Optional. A Cloud Storage URI specifying where the backup artifacts are stored, in the format gs:///. + - !ruby/object:Api::Type::Enum + name: 'releaseChannel' + description: | + Immutable. The release channel of the service. If unspecified, defaults to STABLE. + values: + - :RELEASE_CHANNEL_UNSPECIFIED + - :CANARY + - :STABLE + - !ruby/object:Api::Type::NestedObject + name: 'encryptionConfig' + description: | + Encryption settings for the service. + properties: + - !ruby/object:Api::Type::String + name: 'kmsKey' + description: | + The fully qualified customer provided Cloud KMS key name to use for customer data encryption, in the following format:projects/{project_number}/locations/{location_id}/keyRings/{key_ring_id}/cryptoKeys/{crypto_key_id}. + - !ruby/object:Api::Type::NestedObject + name: 'networkConfig' + description: | + Network configuration for the Dataproc Metastore service. + properties: + - !ruby/object:Api::Type::Array + name: 'consumers' + description: | + Immutable. The consumer-side network configuration for the Dataproc Metastore instance. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'subnetwork' + description: | + Immutable. The subnetwork of the customer project from which an IP address is reserved and used as the Dataproc Metastore service's endpoint. It is accessible to hosts in the subnet and to all hosts in a subnet in the same region and same network. There must be at least one IP address available in the subnet's primary range. The subnet is specified in the following form:projects/{project_number}/regions/{region_id}/subnetworks/{subnetwork_id} + - !ruby/object:Api::Type::String + name: 'endpointUri' + description: | + Output only. The URI of the endpoint used to access the metastore service. + - !ruby/object:Api::Type::String + name: 'endpointLocation' + description: | + Output only. The location of the endpoint URI. Format: projects/{project}/locations/{location}. + - !ruby/object:Api::Type::Boolean + name: 'customRoutesEnabled' + description: | + Enables custom routes to be imported and exported for the Dataproc Metastore service's peered VPC network. + - !ruby/object:Api::Type::Enum + name: 'databaseType' + description: | + Immutable. The database type that the Metastore service stores its data. + values: + - :DATABASE_TYPE_UNSPECIFIED + - :MYSQL + - :SPANNER + - !ruby/object:Api::Type::NestedObject + name: 'telemetryConfig' + description: | + Telemetry Configuration for the Dataproc Metastore service. + properties: + - !ruby/object:Api::Type::Enum + name: 'logFormat' + description: | + The output format of the Dataproc Metastore service's logs. + values: + - :LOG_FORMAT_UNSPECIFIED + - :LEGACY + - :JSON + - !ruby/object:Api::Type::NestedObject + name: 'scalingConfig' + description: | + Represents the scaling configuration of a metastore service. + properties: + - !ruby/object:Api::Type::Enum + name: 'instanceSize' + description: | + An enum of readable instance sizes, with each instance size mapping to a float value (e.g. InstanceSize.EXTRA_SMALL = scaling_factor(0.1)) + values: + - :INSTANCE_SIZE_UNSPECIFIED + - :EXTRA_SMALL + - :SMALL + - :MEDIUM + - :LARGE + - :EXTRA_LARGE + - !ruby/object:Api::Type::Integer + name: 'scalingFactor' + description: | + Scaling factor, increments of 0.1 for values less than 1.0, and increments of 1.0 for values greater than 1.0. + - !ruby/object:Api::Type::NestedObject + name: 'autoscalingConfig' + description: | + Represents the autoscaling configuration of a metastore service. + properties: + - !ruby/object:Api::Type::Integer + name: 'autoscalingFactor' + description: | + Output only. The scaling factor of a service with autoscaling enabled. + - !ruby/object:Api::Type::Boolean + name: 'autoscalingEnabled' + description: | + Optional. Whether or not autoscaling is enabled for this service. + - !ruby/object:Api::Type::NestedObject + name: 'limitConfig' + description: | + Represents the autoscaling limit configuration of a metastore service. + properties: + - !ruby/object:Api::Type::Integer + name: 'maxScalingFactor' + description: | + Optional. The highest scaling factor that the service should be autoscaled to. + - !ruby/object:Api::Type::Integer + name: 'minScalingFactor' + description: | + Optional. The lowest scaling factor that the service should be autoscaled to. + - !ruby/object:Api::Type::NestedObject + name: 'scheduledBackup' + description: | + This specifies the configuration of scheduled backup. + properties: + - !ruby/object:Api::Type::Boolean + name: 'enabled' + description: | + Optional. Defines whether the scheduled backup is enabled. The default value is false. + - !ruby/object:Api::Type::String + name: 'cronSchedule' + description: | + Optional. The scheduled interval in Cron format, see https://en.wikipedia.org/wiki/Cron The default is empty: scheduled backup is not enabled. Must be specified to enable scheduled backups. + - !ruby/object:Api::Type::String + name: 'timeZone' + description: | + Optional. Specifies the time zone to be used when interpreting cron_schedule. Must be a time zone name from the time zone database (https://en.wikipedia.org/wiki/List_of_tz_database_time_zones), e.g. America/Los_Angeles or Africa/Abidjan. If left unspecified, the default is UTC. + - !ruby/object:Api::Type::String + name: 'nextScheduledTime' + description: | + Output only. The time when the next backups execution is scheduled to start. + - !ruby/object:Api::Type::String + name: 'backupLocation' + description: | + Optional. A Cloud Storage URI of a folder, in the format gs:///. A sub-folder containing backup files will be stored below it. + - !ruby/object:Api::Type::NestedObject + name: 'latestBackup' + description: | + The details of the latest scheduled backup. + properties: + - !ruby/object:Api::Type::String + name: 'backupId' + description: | + Output only. The ID of an in-progress scheduled backup. Empty if no backup is in progress. + - !ruby/object:Api::Type::String + name: 'startTime' + description: | + Output only. The time when the backup was started. + - !ruby/object:Api::Type::Enum + name: 'state' + description: | + Output only. The current state of the backup. + values: + - :STATE_UNSPECIFIED + - :IN_PROGRESS + - :SUCCEEDED + - :FAILED + - !ruby/object:Api::Type::String + name: 'duration' + description: | + Output only. The duration of the backup completion. + - !ruby/object:Api::Type::Boolean + name: 'deletionProtection' + description: | + Optional. Indicates if the dataproc metastore should be protected against accidental deletions. + + + + + - !ruby/object:Api::Resource + name: ProjectLocationServiceBackup + base_url: '{{+parent}}/backups' + self_link: '{{+name}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/metastore/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + The details of a backup resource. + properties: + + - !ruby/object:Api::Type::String + name: 'name' + description: | + Immutable. The relative resource name of the backup, in the following form:projects/{project_number}/locations/{location_id}/services/{service_id}/backups/{backup_id} + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Output only. The time when the backup was started. + - !ruby/object:Api::Type::String + name: 'endTime' + description: | + Output only. The time when the backup finished creating. + - !ruby/object:Api::Type::Enum + name: 'state' + description: | + Output only. The current state of the backup. + values: + - :STATE_UNSPECIFIED + - :CREATING + - :DELETING + - :ACTIVE + - :FAILED + - :RESTORING + - !ruby/object:Api::Type::NestedObject + name: 'serviceRevision' + description: | + A managed metastore service that serves metadata queries. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'hiveMetastoreConfig' + description: | + Specifies configuration information specific to running Hive metastore software as the metastore service. + properties: + - !ruby/object:Api::Type::String + name: 'version' + description: | + Immutable. The Hive metastore schema version. + - !ruby/object:Api::Type::NestedObject + name: 'configOverrides' + description: | + A mapping of Hive metastore configuration key-value pairs to apply to the Hive metastore (configured in hive-site.xml). The mappings override system defaults (some keys cannot be overridden). These overrides are also applied to auxiliary versions and can be further customized in the auxiliary version's AuxiliaryVersionConfig. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'kerberosConfig' + description: | + Configuration information for a Kerberos principal. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'keytab' + description: | + A securely stored value. + properties: + - !ruby/object:Api::Type::String + name: 'cloudSecret' + description: | + The relative resource name of a Secret Manager secret version, in the following form:projects/{project_number}/secrets/{secret_id}/versions/{version_id}. + - !ruby/object:Api::Type::String + name: 'principal' + description: | + A Kerberos principal that exists in the both the keytab the KDC to authenticate as. A typical principal is of the form primary/instance@REALM, but there is no exact format. + - !ruby/object:Api::Type::String + name: 'krb5ConfigGcsUri' + description: | + A Cloud Storage URI that specifies the path to a krb5.conf file. It is of the form gs://{bucket_name}/path/to/krb5.conf, although the file does not need to be named krb5.conf explicitly. + - !ruby/object:Api::Type::Enum + name: 'endpointProtocol' + description: | + The protocol to use for the metastore service endpoint. If unspecified, defaults to THRIFT. + values: + - :ENDPOINT_PROTOCOL_UNSPECIFIED + - :THRIFT + - :GRPC + - !ruby/object:Api::Type::NestedObject + name: 'auxiliaryVersions' + description: | + A mapping of Hive metastore version to the auxiliary version configuration. When specified, a secondary Hive metastore service is created along with the primary service. All auxiliary versions must be less than the service's primary version. The key is the auxiliary service name and it must match the regular expression a-z?. This means that the first character must be a lowercase letter, and all the following characters must be hyphens, lowercase letters, or digits, except the last character, which cannot be a hyphen. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + Configuration information for the auxiliary service versions. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Immutable. The relative resource name of the metastore service, in the following format:projects/{project_number}/locations/{location_id}/services/{service_id}. + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Output only. The time when the metastore service was created. + - !ruby/object:Api::Type::String + name: 'updateTime' + description: | + Output only. The time when the metastore service was last updated. + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + User-defined labels for the metastore service. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::String + name: 'network' + description: | + Immutable. The relative resource name of the VPC network on which the instance can be accessed. It is specified in the following form:projects/{project_number}/global/networks/{network_id}. + - !ruby/object:Api::Type::String + name: 'endpointUri' + description: | + Output only. The URI of the endpoint used to access the metastore service. + - !ruby/object:Api::Type::Integer + name: 'port' + description: | + The TCP port at which the metastore service is reached. Default: 9083. + - !ruby/object:Api::Type::Enum + name: 'state' + description: | + Output only. The current state of the metastore service. + values: + - :STATE_UNSPECIFIED + - :CREATING + - :ACTIVE + - :SUSPENDING + - :SUSPENDED + - :UPDATING + - :DELETING + - :ERROR + - :MIGRATING + - !ruby/object:Api::Type::String + name: 'stateMessage' + description: | + Output only. Additional information about the current state of the metastore service, if available. + - !ruby/object:Api::Type::String + name: 'artifactGcsUri' + description: | + Output only. A Cloud Storage URI (starting with gs://) that specifies where artifacts related to the metastore service are stored. + - !ruby/object:Api::Type::Enum + name: 'tier' + description: | + The tier of the service. + values: + - :TIER_UNSPECIFIED + - :DEVELOPER + - :ENTERPRISE + - !ruby/object:Api::Type::NestedObject + name: 'metadataIntegration' + description: | + Specifies how metastore metadata should be integrated with external services. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'dataCatalogConfig' + description: | + Specifies how metastore metadata should be integrated with the Data Catalog service. + properties: + - !ruby/object:Api::Type::Boolean + name: 'enabled' + description: | + Optional. Defines whether the metastore metadata should be synced to Data Catalog. The default value is to disable syncing metastore metadata to Data Catalog. + - !ruby/object:Api::Type::NestedObject + name: 'dataplexConfig' + description: | + Specifies how metastore metadata should be integrated with the Dataplex service. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'lakeResources' + description: | + A reference to the Lake resources that this metastore service is attached to. The key is the lake resource name. Example: projects/{project_number}/locations/{location_id}/lakes/{lake_id}. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + Represents a Lake resource + - !ruby/object:Api::Type::NestedObject + name: 'maintenanceWindow' + description: | + Maintenance window. This specifies when Dataproc Metastore may perform system maintenance operation to the service. + properties: + - !ruby/object:Api::Type::Integer + name: 'hourOfDay' + description: | + The hour of day (0-23) when the window starts. + - !ruby/object:Api::Type::Enum + name: 'dayOfWeek' + description: | + The day of week, when the window starts. + values: + - :DAY_OF_WEEK_UNSPECIFIED + - :MONDAY + - :TUESDAY + - :WEDNESDAY + - :THURSDAY + - :FRIDAY + - :SATURDAY + - :SUNDAY + - !ruby/object:Api::Type::String + name: 'uid' + description: | + Output only. The globally unique resource identifier of the metastore service. + - !ruby/object:Api::Type::NestedObject + name: 'metadataManagementActivity' + description: | + The metadata management activities of the metastore service. + properties: + - !ruby/object:Api::Type::Array + name: 'metadataExports' + description: | + Output only. The latest metadata exports of the metastore service. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'destinationGcsUri' + description: | + Output only. A Cloud Storage URI of a folder that metadata are exported to, in the form of gs:////, where is automatically generated. + - !ruby/object:Api::Type::String + name: 'startTime' + description: | + Output only. The time when the export started. + - !ruby/object:Api::Type::String + name: 'endTime' + description: | + Output only. The time when the export ended. + - !ruby/object:Api::Type::Enum + name: 'state' + description: | + Output only. The current state of the export. + values: + - :STATE_UNSPECIFIED + - :RUNNING + - :SUCCEEDED + - :FAILED + - :CANCELLED + - !ruby/object:Api::Type::Enum + name: 'databaseDumpType' + description: | + Output only. The type of the database dump. + values: + - :TYPE_UNSPECIFIED + - :MYSQL + - :AVRO + - !ruby/object:Api::Type::Array + name: 'restores' + description: | + Output only. The latest restores of the metastore service. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'startTime' + description: | + Output only. The time when the restore started. + - !ruby/object:Api::Type::String + name: 'endTime' + description: | + Output only. The time when the restore ended. + - !ruby/object:Api::Type::Enum + name: 'state' + description: | + Output only. The current state of the restore. + values: + - :STATE_UNSPECIFIED + - :RUNNING + - :SUCCEEDED + - :FAILED + - :CANCELLED + - !ruby/object:Api::Type::String + name: 'backup' + description: | + Output only. The relative resource name of the metastore service backup to restore from, in the following form:projects/{project_id}/locations/{location_id}/services/{service_id}/backups/{backup_id}. + - !ruby/object:Api::Type::Enum + name: 'type' + description: | + Output only. The type of restore. + values: + - :RESTORE_TYPE_UNSPECIFIED + - :FULL + - :METADATA_ONLY + - !ruby/object:Api::Type::String + name: 'details' + description: | + Output only. The restore details containing the revision of the service to be restored to, in format of JSON. + - !ruby/object:Api::Type::String + name: 'backupLocation' + description: | + Optional. A Cloud Storage URI specifying where the backup artifacts are stored, in the format gs:///. + - !ruby/object:Api::Type::Enum + name: 'releaseChannel' + description: | + Immutable. The release channel of the service. If unspecified, defaults to STABLE. + values: + - :RELEASE_CHANNEL_UNSPECIFIED + - :CANARY + - :STABLE + - !ruby/object:Api::Type::NestedObject + name: 'encryptionConfig' + description: | + Encryption settings for the service. + properties: + - !ruby/object:Api::Type::String + name: 'kmsKey' + description: | + The fully qualified customer provided Cloud KMS key name to use for customer data encryption, in the following format:projects/{project_number}/locations/{location_id}/keyRings/{key_ring_id}/cryptoKeys/{crypto_key_id}. + - !ruby/object:Api::Type::NestedObject + name: 'networkConfig' + description: | + Network configuration for the Dataproc Metastore service. + properties: + - !ruby/object:Api::Type::Array + name: 'consumers' + description: | + Immutable. The consumer-side network configuration for the Dataproc Metastore instance. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'subnetwork' + description: | + Immutable. The subnetwork of the customer project from which an IP address is reserved and used as the Dataproc Metastore service's endpoint. It is accessible to hosts in the subnet and to all hosts in a subnet in the same region and same network. There must be at least one IP address available in the subnet's primary range. The subnet is specified in the following form:projects/{project_number}/regions/{region_id}/subnetworks/{subnetwork_id} + - !ruby/object:Api::Type::String + name: 'endpointUri' + description: | + Output only. The URI of the endpoint used to access the metastore service. + - !ruby/object:Api::Type::String + name: 'endpointLocation' + description: | + Output only. The location of the endpoint URI. Format: projects/{project}/locations/{location}. + - !ruby/object:Api::Type::Boolean + name: 'customRoutesEnabled' + description: | + Enables custom routes to be imported and exported for the Dataproc Metastore service's peered VPC network. + - !ruby/object:Api::Type::Enum + name: 'databaseType' + description: | + Immutable. The database type that the Metastore service stores its data. + values: + - :DATABASE_TYPE_UNSPECIFIED + - :MYSQL + - :SPANNER + - !ruby/object:Api::Type::NestedObject + name: 'telemetryConfig' + description: | + Telemetry Configuration for the Dataproc Metastore service. + properties: + - !ruby/object:Api::Type::Enum + name: 'logFormat' + description: | + The output format of the Dataproc Metastore service's logs. + values: + - :LOG_FORMAT_UNSPECIFIED + - :LEGACY + - :JSON + - !ruby/object:Api::Type::NestedObject + name: 'scalingConfig' + description: | + Represents the scaling configuration of a metastore service. + properties: + - !ruby/object:Api::Type::Enum + name: 'instanceSize' + description: | + An enum of readable instance sizes, with each instance size mapping to a float value (e.g. InstanceSize.EXTRA_SMALL = scaling_factor(0.1)) + values: + - :INSTANCE_SIZE_UNSPECIFIED + - :EXTRA_SMALL + - :SMALL + - :MEDIUM + - :LARGE + - :EXTRA_LARGE + - !ruby/object:Api::Type::Integer + name: 'scalingFactor' + description: | + Scaling factor, increments of 0.1 for values less than 1.0, and increments of 1.0 for values greater than 1.0. + - !ruby/object:Api::Type::NestedObject + name: 'autoscalingConfig' + description: | + Represents the autoscaling configuration of a metastore service. + properties: + - !ruby/object:Api::Type::Integer + name: 'autoscalingFactor' + description: | + Output only. The scaling factor of a service with autoscaling enabled. + - !ruby/object:Api::Type::Boolean + name: 'autoscalingEnabled' + description: | + Optional. Whether or not autoscaling is enabled for this service. + - !ruby/object:Api::Type::NestedObject + name: 'limitConfig' + description: | + Represents the autoscaling limit configuration of a metastore service. + properties: + - !ruby/object:Api::Type::Integer + name: 'maxScalingFactor' + description: | + Optional. The highest scaling factor that the service should be autoscaled to. + - !ruby/object:Api::Type::Integer + name: 'minScalingFactor' + description: | + Optional. The lowest scaling factor that the service should be autoscaled to. + - !ruby/object:Api::Type::NestedObject + name: 'scheduledBackup' + description: | + This specifies the configuration of scheduled backup. + properties: + - !ruby/object:Api::Type::Boolean + name: 'enabled' + description: | + Optional. Defines whether the scheduled backup is enabled. The default value is false. + - !ruby/object:Api::Type::String + name: 'cronSchedule' + description: | + Optional. The scheduled interval in Cron format, see https://en.wikipedia.org/wiki/Cron The default is empty: scheduled backup is not enabled. Must be specified to enable scheduled backups. + - !ruby/object:Api::Type::String + name: 'timeZone' + description: | + Optional. Specifies the time zone to be used when interpreting cron_schedule. Must be a time zone name from the time zone database (https://en.wikipedia.org/wiki/List_of_tz_database_time_zones), e.g. America/Los_Angeles or Africa/Abidjan. If left unspecified, the default is UTC. + - !ruby/object:Api::Type::String + name: 'nextScheduledTime' + description: | + Output only. The time when the next backups execution is scheduled to start. + - !ruby/object:Api::Type::String + name: 'backupLocation' + description: | + Optional. A Cloud Storage URI of a folder, in the format gs:///. A sub-folder containing backup files will be stored below it. + - !ruby/object:Api::Type::NestedObject + name: 'latestBackup' + description: | + The details of the latest scheduled backup. + properties: + - !ruby/object:Api::Type::String + name: 'backupId' + description: | + Output only. The ID of an in-progress scheduled backup. Empty if no backup is in progress. + - !ruby/object:Api::Type::String + name: 'startTime' + description: | + Output only. The time when the backup was started. + - !ruby/object:Api::Type::Enum + name: 'state' + description: | + Output only. The current state of the backup. + values: + - :STATE_UNSPECIFIED + - :IN_PROGRESS + - :SUCCEEDED + - :FAILED + - !ruby/object:Api::Type::String + name: 'duration' + description: | + Output only. The duration of the backup completion. + - !ruby/object:Api::Type::Boolean + name: 'deletionProtection' + description: | + Optional. Indicates if the dataproc metastore should be protected against accidental deletions. + - !ruby/object:Api::Type::String + name: 'description' + description: | + The description of the backup. + - !ruby/object:Api::Type::Array + name: 'restoringServices' + description: | + Output only. Services that are restoring from the backup. + item_type: Api::Type::String + + + + + - !ruby/object:Api::Resource + name: ProjectLocationServiceBackup + base_url: '{{+parent}}/backups' + self_link: '{{+name}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/metastore/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + The details of a backup resource. + properties: + + - !ruby/object:Api::Type::String + name: 'name' + description: | + Immutable. The relative resource name of the backup, in the following form:projects/{project_number}/locations/{location_id}/services/{service_id}/backups/{backup_id} + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Output only. The time when the backup was started. + - !ruby/object:Api::Type::String + name: 'endTime' + description: | + Output only. The time when the backup finished creating. + - !ruby/object:Api::Type::Enum + name: 'state' + description: | + Output only. The current state of the backup. + values: + - :STATE_UNSPECIFIED + - :CREATING + - :DELETING + - :ACTIVE + - :FAILED + - :RESTORING + - !ruby/object:Api::Type::NestedObject + name: 'serviceRevision' + description: | + A managed metastore service that serves metadata queries. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'hiveMetastoreConfig' + description: | + Specifies configuration information specific to running Hive metastore software as the metastore service. + properties: + - !ruby/object:Api::Type::String + name: 'version' + description: | + Immutable. The Hive metastore schema version. + - !ruby/object:Api::Type::NestedObject + name: 'configOverrides' + description: | + A mapping of Hive metastore configuration key-value pairs to apply to the Hive metastore (configured in hive-site.xml). The mappings override system defaults (some keys cannot be overridden). These overrides are also applied to auxiliary versions and can be further customized in the auxiliary version's AuxiliaryVersionConfig. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'kerberosConfig' + description: | + Configuration information for a Kerberos principal. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'keytab' + description: | + A securely stored value. + properties: + - !ruby/object:Api::Type::String + name: 'cloudSecret' + description: | + The relative resource name of a Secret Manager secret version, in the following form:projects/{project_number}/secrets/{secret_id}/versions/{version_id}. + - !ruby/object:Api::Type::String + name: 'principal' + description: | + A Kerberos principal that exists in the both the keytab the KDC to authenticate as. A typical principal is of the form primary/instance@REALM, but there is no exact format. + - !ruby/object:Api::Type::String + name: 'krb5ConfigGcsUri' + description: | + A Cloud Storage URI that specifies the path to a krb5.conf file. It is of the form gs://{bucket_name}/path/to/krb5.conf, although the file does not need to be named krb5.conf explicitly. + - !ruby/object:Api::Type::Enum + name: 'endpointProtocol' + description: | + The protocol to use for the metastore service endpoint. If unspecified, defaults to THRIFT. + values: + - :ENDPOINT_PROTOCOL_UNSPECIFIED + - :THRIFT + - :GRPC + - !ruby/object:Api::Type::NestedObject + name: 'auxiliaryVersions' + description: | + A mapping of Hive metastore version to the auxiliary version configuration. When specified, a secondary Hive metastore service is created along with the primary service. All auxiliary versions must be less than the service's primary version. The key is the auxiliary service name and it must match the regular expression a-z?. This means that the first character must be a lowercase letter, and all the following characters must be hyphens, lowercase letters, or digits, except the last character, which cannot be a hyphen. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + Configuration information for the auxiliary service versions. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Immutable. The relative resource name of the metastore service, in the following format:projects/{project_number}/locations/{location_id}/services/{service_id}. + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Output only. The time when the metastore service was created. + - !ruby/object:Api::Type::String + name: 'updateTime' + description: | + Output only. The time when the metastore service was last updated. + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + User-defined labels for the metastore service. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::String + name: 'network' + description: | + Immutable. The relative resource name of the VPC network on which the instance can be accessed. It is specified in the following form:projects/{project_number}/global/networks/{network_id}. + - !ruby/object:Api::Type::String + name: 'endpointUri' + description: | + Output only. The URI of the endpoint used to access the metastore service. + - !ruby/object:Api::Type::Integer + name: 'port' + description: | + The TCP port at which the metastore service is reached. Default: 9083. + - !ruby/object:Api::Type::Enum + name: 'state' + description: | + Output only. The current state of the metastore service. + values: + - :STATE_UNSPECIFIED + - :CREATING + - :ACTIVE + - :SUSPENDING + - :SUSPENDED + - :UPDATING + - :DELETING + - :ERROR + - :MIGRATING + - !ruby/object:Api::Type::String + name: 'stateMessage' + description: | + Output only. Additional information about the current state of the metastore service, if available. + - !ruby/object:Api::Type::String + name: 'artifactGcsUri' + description: | + Output only. A Cloud Storage URI (starting with gs://) that specifies where artifacts related to the metastore service are stored. + - !ruby/object:Api::Type::Enum + name: 'tier' + description: | + The tier of the service. + values: + - :TIER_UNSPECIFIED + - :DEVELOPER + - :ENTERPRISE + - !ruby/object:Api::Type::NestedObject + name: 'metadataIntegration' + description: | + Specifies how metastore metadata should be integrated with external services. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'dataCatalogConfig' + description: | + Specifies how metastore metadata should be integrated with the Data Catalog service. + properties: + - !ruby/object:Api::Type::Boolean + name: 'enabled' + description: | + Optional. Defines whether the metastore metadata should be synced to Data Catalog. The default value is to disable syncing metastore metadata to Data Catalog. + - !ruby/object:Api::Type::NestedObject + name: 'dataplexConfig' + description: | + Specifies how metastore metadata should be integrated with the Dataplex service. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'lakeResources' + description: | + A reference to the Lake resources that this metastore service is attached to. The key is the lake resource name. Example: projects/{project_number}/locations/{location_id}/lakes/{lake_id}. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + Represents a Lake resource + - !ruby/object:Api::Type::NestedObject + name: 'maintenanceWindow' + description: | + Maintenance window. This specifies when Dataproc Metastore may perform system maintenance operation to the service. + properties: + - !ruby/object:Api::Type::Integer + name: 'hourOfDay' + description: | + The hour of day (0-23) when the window starts. + - !ruby/object:Api::Type::Enum + name: 'dayOfWeek' + description: | + The day of week, when the window starts. + values: + - :DAY_OF_WEEK_UNSPECIFIED + - :MONDAY + - :TUESDAY + - :WEDNESDAY + - :THURSDAY + - :FRIDAY + - :SATURDAY + - :SUNDAY + - !ruby/object:Api::Type::String + name: 'uid' + description: | + Output only. The globally unique resource identifier of the metastore service. + - !ruby/object:Api::Type::NestedObject + name: 'metadataManagementActivity' + description: | + The metadata management activities of the metastore service. + properties: + - !ruby/object:Api::Type::Array + name: 'metadataExports' + description: | + Output only. The latest metadata exports of the metastore service. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'destinationGcsUri' + description: | + Output only. A Cloud Storage URI of a folder that metadata are exported to, in the form of gs:////, where is automatically generated. + - !ruby/object:Api::Type::String + name: 'startTime' + description: | + Output only. The time when the export started. + - !ruby/object:Api::Type::String + name: 'endTime' + description: | + Output only. The time when the export ended. + - !ruby/object:Api::Type::Enum + name: 'state' + description: | + Output only. The current state of the export. + values: + - :STATE_UNSPECIFIED + - :RUNNING + - :SUCCEEDED + - :FAILED + - :CANCELLED + - !ruby/object:Api::Type::Enum + name: 'databaseDumpType' + description: | + Output only. The type of the database dump. + values: + - :TYPE_UNSPECIFIED + - :MYSQL + - :AVRO + - !ruby/object:Api::Type::Array + name: 'restores' + description: | + Output only. The latest restores of the metastore service. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'startTime' + description: | + Output only. The time when the restore started. + - !ruby/object:Api::Type::String + name: 'endTime' + description: | + Output only. The time when the restore ended. + - !ruby/object:Api::Type::Enum + name: 'state' + description: | + Output only. The current state of the restore. + values: + - :STATE_UNSPECIFIED + - :RUNNING + - :SUCCEEDED + - :FAILED + - :CANCELLED + - !ruby/object:Api::Type::String + name: 'backup' + description: | + Output only. The relative resource name of the metastore service backup to restore from, in the following form:projects/{project_id}/locations/{location_id}/services/{service_id}/backups/{backup_id}. + - !ruby/object:Api::Type::Enum + name: 'type' + description: | + Output only. The type of restore. + values: + - :RESTORE_TYPE_UNSPECIFIED + - :FULL + - :METADATA_ONLY + - !ruby/object:Api::Type::String + name: 'details' + description: | + Output only. The restore details containing the revision of the service to be restored to, in format of JSON. + - !ruby/object:Api::Type::String + name: 'backupLocation' + description: | + Optional. A Cloud Storage URI specifying where the backup artifacts are stored, in the format gs:///. + - !ruby/object:Api::Type::Enum + name: 'releaseChannel' + description: | + Immutable. The release channel of the service. If unspecified, defaults to STABLE. + values: + - :RELEASE_CHANNEL_UNSPECIFIED + - :CANARY + - :STABLE + - !ruby/object:Api::Type::NestedObject + name: 'encryptionConfig' + description: | + Encryption settings for the service. + properties: + - !ruby/object:Api::Type::String + name: 'kmsKey' + description: | + The fully qualified customer provided Cloud KMS key name to use for customer data encryption, in the following format:projects/{project_number}/locations/{location_id}/keyRings/{key_ring_id}/cryptoKeys/{crypto_key_id}. + - !ruby/object:Api::Type::NestedObject + name: 'networkConfig' + description: | + Network configuration for the Dataproc Metastore service. + properties: + - !ruby/object:Api::Type::Array + name: 'consumers' + description: | + Immutable. The consumer-side network configuration for the Dataproc Metastore instance. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'subnetwork' + description: | + Immutable. The subnetwork of the customer project from which an IP address is reserved and used as the Dataproc Metastore service's endpoint. It is accessible to hosts in the subnet and to all hosts in a subnet in the same region and same network. There must be at least one IP address available in the subnet's primary range. The subnet is specified in the following form:projects/{project_number}/regions/{region_id}/subnetworks/{subnetwork_id} + - !ruby/object:Api::Type::String + name: 'endpointUri' + description: | + Output only. The URI of the endpoint used to access the metastore service. + - !ruby/object:Api::Type::String + name: 'endpointLocation' + description: | + Output only. The location of the endpoint URI. Format: projects/{project}/locations/{location}. + - !ruby/object:Api::Type::Boolean + name: 'customRoutesEnabled' + description: | + Enables custom routes to be imported and exported for the Dataproc Metastore service's peered VPC network. + - !ruby/object:Api::Type::Enum + name: 'databaseType' + description: | + Immutable. The database type that the Metastore service stores its data. + values: + - :DATABASE_TYPE_UNSPECIFIED + - :MYSQL + - :SPANNER + - !ruby/object:Api::Type::NestedObject + name: 'telemetryConfig' + description: | + Telemetry Configuration for the Dataproc Metastore service. + properties: + - !ruby/object:Api::Type::Enum + name: 'logFormat' + description: | + The output format of the Dataproc Metastore service's logs. + values: + - :LOG_FORMAT_UNSPECIFIED + - :LEGACY + - :JSON + - !ruby/object:Api::Type::NestedObject + name: 'scalingConfig' + description: | + Represents the scaling configuration of a metastore service. + properties: + - !ruby/object:Api::Type::Enum + name: 'instanceSize' + description: | + An enum of readable instance sizes, with each instance size mapping to a float value (e.g. InstanceSize.EXTRA_SMALL = scaling_factor(0.1)) + values: + - :INSTANCE_SIZE_UNSPECIFIED + - :EXTRA_SMALL + - :SMALL + - :MEDIUM + - :LARGE + - :EXTRA_LARGE + - !ruby/object:Api::Type::Integer + name: 'scalingFactor' + description: | + Scaling factor, increments of 0.1 for values less than 1.0, and increments of 1.0 for values greater than 1.0. + - !ruby/object:Api::Type::NestedObject + name: 'autoscalingConfig' + description: | + Represents the autoscaling configuration of a metastore service. + properties: + - !ruby/object:Api::Type::Integer + name: 'autoscalingFactor' + description: | + Output only. The scaling factor of a service with autoscaling enabled. + - !ruby/object:Api::Type::Boolean + name: 'autoscalingEnabled' + description: | + Optional. Whether or not autoscaling is enabled for this service. + - !ruby/object:Api::Type::NestedObject + name: 'limitConfig' + description: | + Represents the autoscaling limit configuration of a metastore service. + properties: + - !ruby/object:Api::Type::Integer + name: 'maxScalingFactor' + description: | + Optional. The highest scaling factor that the service should be autoscaled to. + - !ruby/object:Api::Type::Integer + name: 'minScalingFactor' + description: | + Optional. The lowest scaling factor that the service should be autoscaled to. + - !ruby/object:Api::Type::NestedObject + name: 'scheduledBackup' + description: | + This specifies the configuration of scheduled backup. + properties: + - !ruby/object:Api::Type::Boolean + name: 'enabled' + description: | + Optional. Defines whether the scheduled backup is enabled. The default value is false. + - !ruby/object:Api::Type::String + name: 'cronSchedule' + description: | + Optional. The scheduled interval in Cron format, see https://en.wikipedia.org/wiki/Cron The default is empty: scheduled backup is not enabled. Must be specified to enable scheduled backups. + - !ruby/object:Api::Type::String + name: 'timeZone' + description: | + Optional. Specifies the time zone to be used when interpreting cron_schedule. Must be a time zone name from the time zone database (https://en.wikipedia.org/wiki/List_of_tz_database_time_zones), e.g. America/Los_Angeles or Africa/Abidjan. If left unspecified, the default is UTC. + - !ruby/object:Api::Type::String + name: 'nextScheduledTime' + description: | + Output only. The time when the next backups execution is scheduled to start. + - !ruby/object:Api::Type::String + name: 'backupLocation' + description: | + Optional. A Cloud Storage URI of a folder, in the format gs:///. A sub-folder containing backup files will be stored below it. + - !ruby/object:Api::Type::NestedObject + name: 'latestBackup' + description: | + The details of the latest scheduled backup. + properties: + - !ruby/object:Api::Type::String + name: 'backupId' + description: | + Output only. The ID of an in-progress scheduled backup. Empty if no backup is in progress. + - !ruby/object:Api::Type::String + name: 'startTime' + description: | + Output only. The time when the backup was started. + - !ruby/object:Api::Type::Enum + name: 'state' + description: | + Output only. The current state of the backup. + values: + - :STATE_UNSPECIFIED + - :IN_PROGRESS + - :SUCCEEDED + - :FAILED + - !ruby/object:Api::Type::String + name: 'duration' + description: | + Output only. The duration of the backup completion. + - !ruby/object:Api::Type::Boolean + name: 'deletionProtection' + description: | + Optional. Indicates if the dataproc metastore should be protected against accidental deletions. + - !ruby/object:Api::Type::String + name: 'description' + description: | + The description of the backup. + - !ruby/object:Api::Type::Array + name: 'restoringServices' + description: | + Output only. Services that are restoring from the backup. + item_type: Api::Type::String + diff --git a/mmv1/products/metastore/inspec.yaml b/mmv1/products/metastore/inspec.yaml new file mode 100644 index 000000000..28cb365e9 --- /dev/null +++ b/mmv1/products/metastore/inspec.yaml @@ -0,0 +1,15 @@ +# Copyright 2017 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- !ruby/object:Provider::Inspec::Config +overrides: !ruby/object:Overrides::ResourceOverrides diff --git a/mmv1/products/monitoring/api.yaml b/mmv1/products/monitoring/api.yaml index c9b20771b..0a0facd92 100644 --- a/mmv1/products/monitoring/api.yaml +++ b/mmv1/products/monitoring/api.yaml @@ -2114,3 +2114,109 @@ objects: This field allows time series to be associated with the intersection of this metric type and the monitored resource types in this list. item_type: Api::Type::String + + + + - !ruby/object:Api::Resource + name: ProjectGroup + base_url: '{{+name}}/groups' + self_link: '{{+name}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/monitoring/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + The description of a dynamic collection of monitored resources. Each group has a filter that is matched against monitored resources and their associated metadata. If a group's filter matches an available monitored resource, then that resource is a member of that group. Groups can contain any number of monitored resources, and each monitored resource can be a member of any number of groups.Groups can be nested in parent-child hierarchies. The parentName field identifies an optional parent for each group. If a group has a parent, then the only monitored resources available to be matched by the group's filter are the resources contained in the parent group. In other words, a group contains the monitored resources that match its filter and the filters of all the group's ancestors. A group without a parent can contain any monitored resource.For example, consider an infrastructure running a set of instances with two user-defined tags: "environment" and "role". A parent group has a filter, environment="production". A child of that parent group has a filter, role="transcoder". The parent group contains all instances in the production environment, regardless of their roles. The child group contains instances that have the transcoder role and are in the production environment.The monitored resources contained in a group can change at any moment, depending on what resources exist and what filters are associated with the group and its ancestors. + properties: + + - !ruby/object:Api::Type::String + name: 'name' + description: | + Output only. The name of this group. The format is: projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] When creating a group, this field is ignored and a new name is created consisting of the project specified in the call to CreateGroup and a unique [GROUP_ID] that is generated automatically. + - !ruby/object:Api::Type::String + name: 'displayName' + description: | + A user-assigned name for this group, used only for display purposes. + - !ruby/object:Api::Type::String + name: 'parentName' + description: | + The name of the group's parent, if it has one. The format is: projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] For groups with no parent, parent_name is the empty string, "". + - !ruby/object:Api::Type::String + name: 'filter' + description: | + The filter used to determine which monitored resources belong to this group. + - !ruby/object:Api::Type::Boolean + name: 'isCluster' + description: | + If true, the members of this group are considered to be a cluster. The system can perform additional analysis on groups that are clusters. + + + + + - !ruby/object:Api::Resource + name: ProjectGroup + base_url: '{{+name}}/groups' + self_link: '{{+name}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/monitoring/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + The description of a dynamic collection of monitored resources. Each group has a filter that is matched against monitored resources and their associated metadata. If a group's filter matches an available monitored resource, then that resource is a member of that group. Groups can contain any number of monitored resources, and each monitored resource can be a member of any number of groups.Groups can be nested in parent-child hierarchies. The parentName field identifies an optional parent for each group. If a group has a parent, then the only monitored resources available to be matched by the group's filter are the resources contained in the parent group. In other words, a group contains the monitored resources that match its filter and the filters of all the group's ancestors. A group without a parent can contain any monitored resource.For example, consider an infrastructure running a set of instances with two user-defined tags: "environment" and "role". A parent group has a filter, environment="production". A child of that parent group has a filter, role="transcoder". The parent group contains all instances in the production environment, regardless of their roles. The child group contains instances that have the transcoder role and are in the production environment.The monitored resources contained in a group can change at any moment, depending on what resources exist and what filters are associated with the group and its ancestors. + properties: + + - !ruby/object:Api::Type::String + name: 'name' + description: | + Output only. The name of this group. The format is: projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] When creating a group, this field is ignored and a new name is created consisting of the project specified in the call to CreateGroup and a unique [GROUP_ID] that is generated automatically. + - !ruby/object:Api::Type::String + name: 'displayName' + description: | + A user-assigned name for this group, used only for display purposes. + - !ruby/object:Api::Type::String + name: 'parentName' + description: | + The name of the group's parent, if it has one. The format is: projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] For groups with no parent, parent_name is the empty string, "". + - !ruby/object:Api::Type::String + name: 'filter' + description: | + The filter used to determine which monitored resources belong to this group. + - !ruby/object:Api::Type::Boolean + name: 'isCluster' + description: | + If true, the members of this group are considered to be a cluster. The system can perform additional analysis on groups that are clusters. + diff --git a/mmv1/products/monitoring/inspec.yaml b/mmv1/products/monitoring/inspec.yaml index c7f0d95ff..0ce81ef7e 100644 --- a/mmv1/products/monitoring/inspec.yaml +++ b/mmv1/products/monitoring/inspec.yaml @@ -12,7 +12,6 @@ # limitations under the License. --- !ruby/object:Provider::Inspec::Config -legacy_name: project overrides: !ruby/object:Overrides::ResourceOverrides AlertPolicy: !ruby/object:Overrides::Inspec::ResourceOverride additional_functions: third_party/inspec/custom_functions/alert_policy.erb diff --git a/mmv1/products/orgpolicy/api.yaml b/mmv1/products/orgpolicy/api.yaml new file mode 100644 index 000000000..a9c20e5e6 --- /dev/null +++ b/mmv1/products/orgpolicy/api.yaml @@ -0,0 +1,920 @@ + +--- !ruby/object:Api::Product +name: orgpolicy +display_name: orgpolicy +versions: + - !ruby/object:Api::Product::Version + name: ga + base_url: https://orgpolicy.googleapis.com/v2/ +scopes: + - https://orgpolicy.googleapis.com//auth/cloud-platform +apis_required: + - !ruby/object:Api::Product::ApiReference + name: https://orgpolicy.googleapis.com/ + url: https://console.cloud.google.com/apis/library/orgpolicy.googleapis.com/ +objects: + + - !ruby/object:Api::Resource + name: FolderPolicy + base_url: 'v2/{{parent}}/policies' + self_link: 'v2/{{name}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/orgpolicy/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + Defines an organization policy which is used to specify constraints for configurations of Google Cloud resources. + properties: + + - !ruby/object:Api::Type::NestedObject + name: 'dryRunSpec' + description: | + Defines a Google Cloud policy specification which is used to specify constraints for configurations of Google Cloud resources. + properties: + - !ruby/object:Api::Type::String + name: 'updateTime' + description: | + Output only. The time stamp this was previously updated. This represents the last time a call to `CreatePolicy` or `UpdatePolicy` was made for that policy. + - !ruby/object:Api::Type::Array + name: 'rules' + description: | + In policies for boolean constraints, the following requirements apply: - There must be one and only one policy rule where condition is unset. - Boolean policy rules with conditions must set `enforced` to the opposite of the policy rule without a condition. - During policy evaluation, policy rules with conditions that are true for a target resource take precedence. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::NestedObject + name: 'condition' + description: | + Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. The syntax and semantics of CEL are documented at https://github.com/google/cel-spec. Example (Comparison): title: "Summary size limit" description: "Determines if a summary is less than 100 chars" expression: "document.summary.size() < 100" Example (Equality): title: "Requestor is owner" description: "Determines if requestor is the document owner" expression: "document.owner == request.auth.claims.email" Example (Logic): title: "Public documents" description: "Determine whether the document should be publicly visible" expression: "document.type != 'private' && document.type != 'internal'" Example (Data Manipulation): title: "Notification string" description: "Create a notification string with a timestamp." expression: "'New message received at ' + string(document.create_time)" The exact variables and functions that may be referenced within an expression are determined by the service that evaluates it. See the service documentation for additional information. + properties: + - !ruby/object:Api::Type::String + name: 'title' + description: | + Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression. + - !ruby/object:Api::Type::String + name: 'location' + description: | + Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file. + - !ruby/object:Api::Type::String + name: 'expression' + description: | + Textual representation of an expression in Common Expression Language syntax. + - !ruby/object:Api::Type::String + name: 'description' + description: | + Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. + - !ruby/object:Api::Type::Boolean + name: 'denyAll' + description: | + Setting this to true means that all values are denied. This field can be set only in policies for list constraints. + - !ruby/object:Api::Type::Boolean + name: 'allowAll' + description: | + Setting this to true means that all values are allowed. This field can be set only in policies for list constraints. + - !ruby/object:Api::Type::Boolean + name: 'enforce' + description: | + If `true`, then the policy is enforced. If `false`, then any configuration is acceptable. This field can be set only in policies for boolean constraints. + - !ruby/object:Api::Type::NestedObject + name: 'values' + description: | + A message that holds specific allowed and denied values. This message can define specific values and subtrees of the Resource Manager resource hierarchy (`Organizations`, `Folders`, `Projects`) that are allowed or denied. This is achieved by using the `under:` and optional `is:` prefixes. The `under:` prefix is used to denote resource subtree values. The `is:` prefix is used to denote specific values, and is required only if the value contains a ":". Values prefixed with "is:" are treated the same as values with no prefix. Ancestry subtrees must be in one of the following formats: - `projects/` (for example, `projects/tokyo-rain-123`) - `folders/` (for example, `folders/1234`) - `organizations/` (for example, `organizations/1234`) The `supports_under` field of the associated `Constraint` defines whether ancestry prefixes can be used. + properties: + - !ruby/object:Api::Type::Array + name: 'deniedValues' + description: | + List of values denied at this resource. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'allowedValues' + description: | + List of values allowed at this resource. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'etag' + description: | + An opaque tag indicating the current version of the policySpec, used for concurrency control. This field is ignored if used in a `CreatePolicy` request. When the policy is returned from either a `GetPolicy` or a `ListPolicies` request, this `etag` indicates the version of the current policySpec to use when executing a read-modify-write loop. When the policy is returned from a `GetEffectivePolicy` request, the `etag` will be unset. + - !ruby/object:Api::Type::Boolean + name: 'reset' + description: | + Ignores policies set above this resource and restores the `constraint_default` enforcement behavior of the specific constraint at this resource. This field can be set in policies for either list or boolean constraints. If set, `rules` must be empty and `inherit_from_parent` must be set to false. + - !ruby/object:Api::Type::Boolean + name: 'inheritFromParent' + description: | + Determines the inheritance behavior for this policy. If `inherit_from_parent` is true, policy rules set higher up in the hierarchy (up to the closest root) are inherited and present in the effective policy. If it is false, then no rules are inherited, and this policy becomes the new root for evaluation. This field can be set only for policies which configure list constraints. + - !ruby/object:Api::Type::NestedObject + name: 'spec' + description: | + Defines a Google Cloud policy specification which is used to specify constraints for configurations of Google Cloud resources. + properties: + - !ruby/object:Api::Type::String + name: 'updateTime' + description: | + Output only. The time stamp this was previously updated. This represents the last time a call to `CreatePolicy` or `UpdatePolicy` was made for that policy. + - !ruby/object:Api::Type::Array + name: 'rules' + description: | + In policies for boolean constraints, the following requirements apply: - There must be one and only one policy rule where condition is unset. - Boolean policy rules with conditions must set `enforced` to the opposite of the policy rule without a condition. - During policy evaluation, policy rules with conditions that are true for a target resource take precedence. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::NestedObject + name: 'condition' + description: | + Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. The syntax and semantics of CEL are documented at https://github.com/google/cel-spec. Example (Comparison): title: "Summary size limit" description: "Determines if a summary is less than 100 chars" expression: "document.summary.size() < 100" Example (Equality): title: "Requestor is owner" description: "Determines if requestor is the document owner" expression: "document.owner == request.auth.claims.email" Example (Logic): title: "Public documents" description: "Determine whether the document should be publicly visible" expression: "document.type != 'private' && document.type != 'internal'" Example (Data Manipulation): title: "Notification string" description: "Create a notification string with a timestamp." expression: "'New message received at ' + string(document.create_time)" The exact variables and functions that may be referenced within an expression are determined by the service that evaluates it. See the service documentation for additional information. + properties: + - !ruby/object:Api::Type::String + name: 'title' + description: | + Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression. + - !ruby/object:Api::Type::String + name: 'location' + description: | + Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file. + - !ruby/object:Api::Type::String + name: 'expression' + description: | + Textual representation of an expression in Common Expression Language syntax. + - !ruby/object:Api::Type::String + name: 'description' + description: | + Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. + - !ruby/object:Api::Type::Boolean + name: 'denyAll' + description: | + Setting this to true means that all values are denied. This field can be set only in policies for list constraints. + - !ruby/object:Api::Type::Boolean + name: 'allowAll' + description: | + Setting this to true means that all values are allowed. This field can be set only in policies for list constraints. + - !ruby/object:Api::Type::Boolean + name: 'enforce' + description: | + If `true`, then the policy is enforced. If `false`, then any configuration is acceptable. This field can be set only in policies for boolean constraints. + - !ruby/object:Api::Type::NestedObject + name: 'values' + description: | + A message that holds specific allowed and denied values. This message can define specific values and subtrees of the Resource Manager resource hierarchy (`Organizations`, `Folders`, `Projects`) that are allowed or denied. This is achieved by using the `under:` and optional `is:` prefixes. The `under:` prefix is used to denote resource subtree values. The `is:` prefix is used to denote specific values, and is required only if the value contains a ":". Values prefixed with "is:" are treated the same as values with no prefix. Ancestry subtrees must be in one of the following formats: - `projects/` (for example, `projects/tokyo-rain-123`) - `folders/` (for example, `folders/1234`) - `organizations/` (for example, `organizations/1234`) The `supports_under` field of the associated `Constraint` defines whether ancestry prefixes can be used. + properties: + - !ruby/object:Api::Type::Array + name: 'deniedValues' + description: | + List of values denied at this resource. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'allowedValues' + description: | + List of values allowed at this resource. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'etag' + description: | + An opaque tag indicating the current version of the policySpec, used for concurrency control. This field is ignored if used in a `CreatePolicy` request. When the policy is returned from either a `GetPolicy` or a `ListPolicies` request, this `etag` indicates the version of the current policySpec to use when executing a read-modify-write loop. When the policy is returned from a `GetEffectivePolicy` request, the `etag` will be unset. + - !ruby/object:Api::Type::Boolean + name: 'reset' + description: | + Ignores policies set above this resource and restores the `constraint_default` enforcement behavior of the specific constraint at this resource. This field can be set in policies for either list or boolean constraints. If set, `rules` must be empty and `inherit_from_parent` must be set to false. + - !ruby/object:Api::Type::Boolean + name: 'inheritFromParent' + description: | + Determines the inheritance behavior for this policy. If `inherit_from_parent` is true, policy rules set higher up in the hierarchy (up to the closest root) are inherited and present in the effective policy. If it is false, then no rules are inherited, and this policy becomes the new root for evaluation. This field can be set only for policies which configure list constraints. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Immutable. The resource name of the policy. Must be one of the following forms, where `constraint_name` is the name of the constraint which this policy configures: * `projects/{project_number}/policies/{constraint_name}` * `folders/{folder_id}/policies/{constraint_name}` * `organizations/{organization_id}/policies/{constraint_name}` For example, `projects/123/policies/compute.disableSerialPortAccess`. Note: `projects/{project_id}/policies/{constraint_name}` is also an acceptable name for API requests, but responses will return the name using the equivalent project number. + - !ruby/object:Api::Type::NestedObject + name: 'alternate' + description: | + Similar to PolicySpec but with an extra 'launch' field for launch reference. The PolicySpec here is specific for dry-run/darklaunch. + properties: + - !ruby/object:Api::Type::String + name: 'launch' + description: | + Reference to the launch that will be used while audit logging and to control the launch. Should be set only in the alternate policy. + - !ruby/object:Api::Type::NestedObject + name: 'spec' + description: | + Defines a Google Cloud policy specification which is used to specify constraints for configurations of Google Cloud resources. + properties: + - !ruby/object:Api::Type::String + name: 'updateTime' + description: | + Output only. The time stamp this was previously updated. This represents the last time a call to `CreatePolicy` or `UpdatePolicy` was made for that policy. + - !ruby/object:Api::Type::Array + name: 'rules' + description: | + In policies for boolean constraints, the following requirements apply: - There must be one and only one policy rule where condition is unset. - Boolean policy rules with conditions must set `enforced` to the opposite of the policy rule without a condition. - During policy evaluation, policy rules with conditions that are true for a target resource take precedence. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::NestedObject + name: 'condition' + description: | + Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. The syntax and semantics of CEL are documented at https://github.com/google/cel-spec. Example (Comparison): title: "Summary size limit" description: "Determines if a summary is less than 100 chars" expression: "document.summary.size() < 100" Example (Equality): title: "Requestor is owner" description: "Determines if requestor is the document owner" expression: "document.owner == request.auth.claims.email" Example (Logic): title: "Public documents" description: "Determine whether the document should be publicly visible" expression: "document.type != 'private' && document.type != 'internal'" Example (Data Manipulation): title: "Notification string" description: "Create a notification string with a timestamp." expression: "'New message received at ' + string(document.create_time)" The exact variables and functions that may be referenced within an expression are determined by the service that evaluates it. See the service documentation for additional information. + properties: + - !ruby/object:Api::Type::String + name: 'title' + description: | + Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression. + - !ruby/object:Api::Type::String + name: 'location' + description: | + Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file. + - !ruby/object:Api::Type::String + name: 'expression' + description: | + Textual representation of an expression in Common Expression Language syntax. + - !ruby/object:Api::Type::String + name: 'description' + description: | + Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. + - !ruby/object:Api::Type::Boolean + name: 'denyAll' + description: | + Setting this to true means that all values are denied. This field can be set only in policies for list constraints. + - !ruby/object:Api::Type::Boolean + name: 'allowAll' + description: | + Setting this to true means that all values are allowed. This field can be set only in policies for list constraints. + - !ruby/object:Api::Type::Boolean + name: 'enforce' + description: | + If `true`, then the policy is enforced. If `false`, then any configuration is acceptable. This field can be set only in policies for boolean constraints. + - !ruby/object:Api::Type::NestedObject + name: 'values' + description: | + A message that holds specific allowed and denied values. This message can define specific values and subtrees of the Resource Manager resource hierarchy (`Organizations`, `Folders`, `Projects`) that are allowed or denied. This is achieved by using the `under:` and optional `is:` prefixes. The `under:` prefix is used to denote resource subtree values. The `is:` prefix is used to denote specific values, and is required only if the value contains a ":". Values prefixed with "is:" are treated the same as values with no prefix. Ancestry subtrees must be in one of the following formats: - `projects/` (for example, `projects/tokyo-rain-123`) - `folders/` (for example, `folders/1234`) - `organizations/` (for example, `organizations/1234`) The `supports_under` field of the associated `Constraint` defines whether ancestry prefixes can be used. + properties: + - !ruby/object:Api::Type::Array + name: 'deniedValues' + description: | + List of values denied at this resource. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'allowedValues' + description: | + List of values allowed at this resource. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'etag' + description: | + An opaque tag indicating the current version of the policySpec, used for concurrency control. This field is ignored if used in a `CreatePolicy` request. When the policy is returned from either a `GetPolicy` or a `ListPolicies` request, this `etag` indicates the version of the current policySpec to use when executing a read-modify-write loop. When the policy is returned from a `GetEffectivePolicy` request, the `etag` will be unset. + - !ruby/object:Api::Type::Boolean + name: 'reset' + description: | + Ignores policies set above this resource and restores the `constraint_default` enforcement behavior of the specific constraint at this resource. This field can be set in policies for either list or boolean constraints. If set, `rules` must be empty and `inherit_from_parent` must be set to false. + - !ruby/object:Api::Type::Boolean + name: 'inheritFromParent' + description: | + Determines the inheritance behavior for this policy. If `inherit_from_parent` is true, policy rules set higher up in the hierarchy (up to the closest root) are inherited and present in the effective policy. If it is false, then no rules are inherited, and this policy becomes the new root for evaluation. This field can be set only for policies which configure list constraints + + + - !ruby/object:Api::Resource + name: OrganizationConstraint + base_url: '{{+parent}}/constraints' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/orgpolicy/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + The response returned from the ListConstraints method. + properties: + + - !ruby/object:Api::Type::String + name: 'nextPageToken' + description: | + Page token used to retrieve the next page. This is currently not used. + - !ruby/object:Api::Type::Array + name: 'constraints' + description: | + The collection of constraints that are available on the targeted resource. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'displayName' + description: | + The human readable name. Mutable. + - !ruby/object:Api::Type::NestedObject + name: 'googleManagedConstraint' + description: | + A Google managed constraint. This represents a subset of fields missing from Constraint proto that are required to describe CustomConstraint + properties: + - !ruby/object:Api::Type::Enum + name: 'actionType' + description: | + Allow or deny type. + values: + - :ACTION_TYPE_UNSPECIFIED + - :ALLOW + - :DENY + - !ruby/object:Api::Type::Array + name: 'resourceTypes' + description: | + The resource instance type on which this policy applies. Format will be of the form : `/` Example: * `compute.googleapis.com/Instance`. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'condition' + description: | + Org policy condition/expression. For example: `resource.instanceName.matches("[production|test]_.*_(\d)+")` or, `resource.management.auto_upgrade == true` The max length of the condition is 1000 characters. + - !ruby/object:Api::Type::Array + name: 'methodTypes' + description: | + All the operations being applied for this constraint. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'description' + description: | + Detailed description of what this constraint controls as well as how and where it is enforced. Mutable. + - !ruby/object:Api::Type::Enum + name: 'constraintDefault' + description: | + The evaluation behavior of this constraint in the absence of a policy. + values: + - :CONSTRAINT_DEFAULT_UNSPECIFIED + - :ALLOW + - :DENY + - !ruby/object:Api::Type::Boolean + name: 'supportsDryRun' + description: | + Shows if dry run is supported for this constraint or not. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Immutable. The resource name of the constraint. Must be in one of the following forms: * `projects/{project_number}/constraints/{constraint_name}` * `folders/{folder_id}/constraints/{constraint_name}` * `organizations/{organization_id}/constraints/{constraint_name}` For example, "/projects/123/constraints/compute.disableSerialPortAccess". + - !ruby/object:Api::Type::NestedObject + name: 'listConstraint' + description: | + A constraint that allows or disallows a list of string values, which are configured by an Organization Policy administrator with a policy. + properties: + - !ruby/object:Api::Type::Boolean + name: 'supportsUnder' + description: | + Indicates whether subtrees of the Resource Manager resource hierarchy can be used in `Policy.allowed_values` and `Policy.denied_values`. For example, `"under:folders/123"` would match any resource under the 'folders/123' folder. + - !ruby/object:Api::Type::Boolean + name: 'supportsIn' + description: | + Indicates whether values grouped into categories can be used in `Policy.allowed_values` and `Policy.denied_values`. For example, `"in:Python"` would match any value in the 'Python' group. + + + - !ruby/object:Api::Resource + name: ProjectConstraint + base_url: '{{+parent}}/constraints' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/orgpolicy/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + The response returned from the ListConstraints method. + properties: + + - !ruby/object:Api::Type::String + name: 'nextPageToken' + description: | + Page token used to retrieve the next page. This is currently not used. + - !ruby/object:Api::Type::Array + name: 'constraints' + description: | + The collection of constraints that are available on the targeted resource. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'displayName' + description: | + The human readable name. Mutable. + - !ruby/object:Api::Type::NestedObject + name: 'googleManagedConstraint' + description: | + A Google managed constraint. This represents a subset of fields missing from Constraint proto that are required to describe CustomConstraint + properties: + - !ruby/object:Api::Type::Enum + name: 'actionType' + description: | + Allow or deny type. + values: + - :ACTION_TYPE_UNSPECIFIED + - :ALLOW + - :DENY + - !ruby/object:Api::Type::Array + name: 'resourceTypes' + description: | + The resource instance type on which this policy applies. Format will be of the form : `/` Example: * `compute.googleapis.com/Instance`. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'condition' + description: | + Org policy condition/expression. For example: `resource.instanceName.matches("[production|test]_.*_(\d)+")` or, `resource.management.auto_upgrade == true` The max length of the condition is 1000 characters. + - !ruby/object:Api::Type::Array + name: 'methodTypes' + description: | + All the operations being applied for this constraint. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'description' + description: | + Detailed description of what this constraint controls as well as how and where it is enforced. Mutable. + - !ruby/object:Api::Type::Enum + name: 'constraintDefault' + description: | + The evaluation behavior of this constraint in the absence of a policy. + values: + - :CONSTRAINT_DEFAULT_UNSPECIFIED + - :ALLOW + - :DENY + - !ruby/object:Api::Type::Boolean + name: 'supportsDryRun' + description: | + Shows if dry run is supported for this constraint or not. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Immutable. The resource name of the constraint. Must be in one of the following forms: * `projects/{project_number}/constraints/{constraint_name}` * `folders/{folder_id}/constraints/{constraint_name}` * `organizations/{organization_id}/constraints/{constraint_name}` For example, "/projects/123/constraints/compute.disableSerialPortAccess". + - !ruby/object:Api::Type::NestedObject + name: 'listConstraint' + description: | + A constraint that allows or disallows a list of string values, which are configured by an Organization Policy administrator with a policy. + properties: + - !ruby/object:Api::Type::Boolean + name: 'supportsUnder' + description: | + Indicates whether subtrees of the Resource Manager resource hierarchy can be used in `Policy.allowed_values` and `Policy.denied_values`. For example, `"under:folders/123"` would match any resource under the 'folders/123' folder. + - !ruby/object:Api::Type::Boolean + name: 'supportsIn' + description: | + Indicates whether values grouped into categories can be used in `Policy.allowed_values` and `Policy.denied_values`. For example, `"in:Python"` would match any value in the 'Python' group. + + + + + + + + + - !ruby/object:Api::Resource + name: OrganizationConstraint + base_url: '{{+parent}}/constraints' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/orgpolicy/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + The response returned from the ListConstraints method. + properties: + + - !ruby/object:Api::Type::String + name: 'nextPageToken' + description: | + Page token used to retrieve the next page. This is currently not used. + - !ruby/object:Api::Type::Array + name: 'constraints' + description: | + The collection of constraints that are available on the targeted resource. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'displayName' + description: | + The human readable name. Mutable. + - !ruby/object:Api::Type::NestedObject + name: 'googleManagedConstraint' + description: | + A Google managed constraint. This represents a subset of fields missing from Constraint proto that are required to describe CustomConstraint + properties: + - !ruby/object:Api::Type::Enum + name: 'actionType' + description: | + Allow or deny type. + values: + - :ACTION_TYPE_UNSPECIFIED + - :ALLOW + - :DENY + - !ruby/object:Api::Type::Array + name: 'resourceTypes' + description: | + The resource instance type on which this policy applies. Format will be of the form : `/` Example: * `compute.googleapis.com/Instance`. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'condition' + description: | + Org policy condition/expression. For example: `resource.instanceName.matches("[production|test]_.*_(\d)+")` or, `resource.management.auto_upgrade == true` The max length of the condition is 1000 characters. + - !ruby/object:Api::Type::Array + name: 'methodTypes' + description: | + All the operations being applied for this constraint. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'description' + description: | + Detailed description of what this constraint controls as well as how and where it is enforced. Mutable. + - !ruby/object:Api::Type::Enum + name: 'constraintDefault' + description: | + The evaluation behavior of this constraint in the absence of a policy. + values: + - :CONSTRAINT_DEFAULT_UNSPECIFIED + - :ALLOW + - :DENY + - !ruby/object:Api::Type::Boolean + name: 'supportsDryRun' + description: | + Shows if dry run is supported for this constraint or not. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Immutable. The resource name of the constraint. Must be in one of the following forms: * `projects/{project_number}/constraints/{constraint_name}` * `folders/{folder_id}/constraints/{constraint_name}` * `organizations/{organization_id}/constraints/{constraint_name}` For example, "/projects/123/constraints/compute.disableSerialPortAccess". + - !ruby/object:Api::Type::NestedObject + name: 'listConstraint' + description: | + A constraint that allows or disallows a list of string values, which are configured by an Organization Policy administrator with a policy. + properties: + - !ruby/object:Api::Type::Boolean + name: 'supportsUnder' + description: | + Indicates whether subtrees of the Resource Manager resource hierarchy can be used in `Policy.allowed_values` and `Policy.denied_values`. For example, `"under:folders/123"` would match any resource under the 'folders/123' folder. + - !ruby/object:Api::Type::Boolean + name: 'supportsIn' + description: | + Indicates whether values grouped into categories can be used in `Policy.allowed_values` and `Policy.denied_values`. For example, `"in:Python"` would match any value in the 'Python' group. + + + + + - !ruby/object:Api::Resource + name: OrganizationConstraint + base_url: '{{+parent}}/constraints' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/orgpolicy/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + The response returned from the ListConstraints method. + properties: + + - !ruby/object:Api::Type::String + name: 'nextPageToken' + description: | + Page token used to retrieve the next page. This is currently not used. + - !ruby/object:Api::Type::Array + name: 'constraints' + description: | + The collection of constraints that are available on the targeted resource. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'displayName' + description: | + The human readable name. Mutable. + - !ruby/object:Api::Type::NestedObject + name: 'googleManagedConstraint' + description: | + A Google managed constraint. This represents a subset of fields missing from Constraint proto that are required to describe CustomConstraint + properties: + - !ruby/object:Api::Type::Enum + name: 'actionType' + description: | + Allow or deny type. + values: + - :ACTION_TYPE_UNSPECIFIED + - :ALLOW + - :DENY + - !ruby/object:Api::Type::Array + name: 'resourceTypes' + description: | + The resource instance type on which this policy applies. Format will be of the form : `/` Example: * `compute.googleapis.com/Instance`. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'condition' + description: | + Org policy condition/expression. For example: `resource.instanceName.matches("[production|test]_.*_(\d)+")` or, `resource.management.auto_upgrade == true` The max length of the condition is 1000 characters. + - !ruby/object:Api::Type::Array + name: 'methodTypes' + description: | + All the operations being applied for this constraint. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'description' + description: | + Detailed description of what this constraint controls as well as how and where it is enforced. Mutable. + - !ruby/object:Api::Type::Enum + name: 'constraintDefault' + description: | + The evaluation behavior of this constraint in the absence of a policy. + values: + - :CONSTRAINT_DEFAULT_UNSPECIFIED + - :ALLOW + - :DENY + - !ruby/object:Api::Type::Boolean + name: 'supportsDryRun' + description: | + Shows if dry run is supported for this constraint or not. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Immutable. The resource name of the constraint. Must be in one of the following forms: * `projects/{project_number}/constraints/{constraint_name}` * `folders/{folder_id}/constraints/{constraint_name}` * `organizations/{organization_id}/constraints/{constraint_name}` For example, "/projects/123/constraints/compute.disableSerialPortAccess". + - !ruby/object:Api::Type::NestedObject + name: 'listConstraint' + description: | + A constraint that allows or disallows a list of string values, which are configured by an Organization Policy administrator with a policy. + properties: + - !ruby/object:Api::Type::Boolean + name: 'supportsUnder' + description: | + Indicates whether subtrees of the Resource Manager resource hierarchy can be used in `Policy.allowed_values` and `Policy.denied_values`. For example, `"under:folders/123"` would match any resource under the 'folders/123' folder. + - !ruby/object:Api::Type::Boolean + name: 'supportsIn' + description: | + Indicates whether values grouped into categories can be used in `Policy.allowed_values` and `Policy.denied_values`. For example, `"in:Python"` would match any value in the 'Python' group. + + + + + - !ruby/object:Api::Resource + name: FolderConstraint + base_url: '{{+parent}}/constraints' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/orgpolicy/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + The response returned from the ListConstraints method. + properties: + + - !ruby/object:Api::Type::String + name: 'nextPageToken' + description: | + Page token used to retrieve the next page. This is currently not used. + - !ruby/object:Api::Type::Array + name: 'constraints' + description: | + The collection of constraints that are available on the targeted resource. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'displayName' + description: | + The human readable name. Mutable. + - !ruby/object:Api::Type::NestedObject + name: 'googleManagedConstraint' + description: | + A Google managed constraint. This represents a subset of fields missing from Constraint proto that are required to describe CustomConstraint + properties: + - !ruby/object:Api::Type::Enum + name: 'actionType' + description: | + Allow or deny type. + values: + - :ACTION_TYPE_UNSPECIFIED + - :ALLOW + - :DENY + - !ruby/object:Api::Type::Array + name: 'resourceTypes' + description: | + The resource instance type on which this policy applies. Format will be of the form : `/` Example: * `compute.googleapis.com/Instance`. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'condition' + description: | + Org policy condition/expression. For example: `resource.instanceName.matches("[production|test]_.*_(\d)+")` or, `resource.management.auto_upgrade == true` The max length of the condition is 1000 characters. + - !ruby/object:Api::Type::Array + name: 'methodTypes' + description: | + All the operations being applied for this constraint. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'description' + description: | + Detailed description of what this constraint controls as well as how and where it is enforced. Mutable. + - !ruby/object:Api::Type::Enum + name: 'constraintDefault' + description: | + The evaluation behavior of this constraint in the absence of a policy. + values: + - :CONSTRAINT_DEFAULT_UNSPECIFIED + - :ALLOW + - :DENY + - !ruby/object:Api::Type::Boolean + name: 'supportsDryRun' + description: | + Shows if dry run is supported for this constraint or not. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Immutable. The resource name of the constraint. Must be in one of the following forms: * `projects/{project_number}/constraints/{constraint_name}` * `folders/{folder_id}/constraints/{constraint_name}` * `organizations/{organization_id}/constraints/{constraint_name}` For example, "/projects/123/constraints/compute.disableSerialPortAccess". + - !ruby/object:Api::Type::NestedObject + name: 'listConstraint' + description: | + A constraint that allows or disallows a list of string values, which are configured by an Organization Policy administrator with a policy. + properties: + - !ruby/object:Api::Type::Boolean + name: 'supportsUnder' + description: | + Indicates whether subtrees of the Resource Manager resource hierarchy can be used in `Policy.allowed_values` and `Policy.denied_values`. For example, `"under:folders/123"` would match any resource under the 'folders/123' folder. + - !ruby/object:Api::Type::Boolean + name: 'supportsIn' + description: | + Indicates whether values grouped into categories can be used in `Policy.allowed_values` and `Policy.denied_values`. For example, `"in:Python"` would match any value in the 'Python' group. + + + + + - !ruby/object:Api::Resource + name: FolderConstraint + base_url: '{{+parent}}/constraints' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/orgpolicy/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + The response returned from the ListConstraints method. + properties: + + - !ruby/object:Api::Type::String + name: 'nextPageToken' + description: | + Page token used to retrieve the next page. This is currently not used. + - !ruby/object:Api::Type::Array + name: 'constraints' + description: | + The collection of constraints that are available on the targeted resource. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'displayName' + description: | + The human readable name. Mutable. + - !ruby/object:Api::Type::NestedObject + name: 'googleManagedConstraint' + description: | + A Google managed constraint. This represents a subset of fields missing from Constraint proto that are required to describe CustomConstraint + properties: + - !ruby/object:Api::Type::Enum + name: 'actionType' + description: | + Allow or deny type. + values: + - :ACTION_TYPE_UNSPECIFIED + - :ALLOW + - :DENY + - !ruby/object:Api::Type::Array + name: 'resourceTypes' + description: | + The resource instance type on which this policy applies. Format will be of the form : `/` Example: * `compute.googleapis.com/Instance`. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'condition' + description: | + Org policy condition/expression. For example: `resource.instanceName.matches("[production|test]_.*_(\d)+")` or, `resource.management.auto_upgrade == true` The max length of the condition is 1000 characters. + - !ruby/object:Api::Type::Array + name: 'methodTypes' + description: | + All the operations being applied for this constraint. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'description' + description: | + Detailed description of what this constraint controls as well as how and where it is enforced. Mutable. + - !ruby/object:Api::Type::Enum + name: 'constraintDefault' + description: | + The evaluation behavior of this constraint in the absence of a policy. + values: + - :CONSTRAINT_DEFAULT_UNSPECIFIED + - :ALLOW + - :DENY + - !ruby/object:Api::Type::Boolean + name: 'supportsDryRun' + description: | + Shows if dry run is supported for this constraint or not. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Immutable. The resource name of the constraint. Must be in one of the following forms: * `projects/{project_number}/constraints/{constraint_name}` * `folders/{folder_id}/constraints/{constraint_name}` * `organizations/{organization_id}/constraints/{constraint_name}` For example, "/projects/123/constraints/compute.disableSerialPortAccess". + - !ruby/object:Api::Type::NestedObject + name: 'listConstraint' + description: | + A constraint that allows or disallows a list of string values, which are configured by an Organization Policy administrator with a policy. + properties: + - !ruby/object:Api::Type::Boolean + name: 'supportsUnder' + description: | + Indicates whether subtrees of the Resource Manager resource hierarchy can be used in `Policy.allowed_values` and `Policy.denied_values`. For example, `"under:folders/123"` would match any resource under the 'folders/123' folder. + - !ruby/object:Api::Type::Boolean + name: 'supportsIn' + description: | + Indicates whether values grouped into categories can be used in `Policy.allowed_values` and `Policy.denied_values`. For example, `"in:Python"` would match any value in the 'Python' group. + diff --git a/mmv1/products/orgpolicy/inspec.yaml b/mmv1/products/orgpolicy/inspec.yaml new file mode 100644 index 000000000..28cb365e9 --- /dev/null +++ b/mmv1/products/orgpolicy/inspec.yaml @@ -0,0 +1,15 @@ +# Copyright 2017 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- !ruby/object:Provider::Inspec::Config +overrides: !ruby/object:Overrides::ResourceOverrides diff --git a/mmv1/products/run/api.yaml b/mmv1/products/run/api.yaml new file mode 100644 index 000000000..515e7c78b --- /dev/null +++ b/mmv1/products/run/api.yaml @@ -0,0 +1,896 @@ + +--- !ruby/object:Api::Product +name: Run +display_name: run +versions: + - !ruby/object:Api::Product::Version + name: ga + base_url: https://run.googleapis.com/v2/ +scopes: + - https://run.googleapis.com//auth/cloud-platform +apis_required: + - !ruby/object:Api::Product::ApiReference + name: https://run.googleapis.com/ + url: https://console.cloud.google.com/apis/library/run.googleapis.com/ +objects: + + + + - !ruby/object:Api::Resource + name: Service + base_url: '{{+parent}}/services' + self_link: '{{+name}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/run/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + Service acts as a top-level container that manages a set of configurations and revision templates which implement a network service. Service exists to provide a singular abstraction which can be access controlled, reasoned about, and which encapsulates software lifecycle decisions such as rollout policy and team resource ownership. + properties: + + - !ruby/object:Api::Type::String + name: 'name' + description: | + The fully qualified name of this Service. In CreateServiceRequest, this field is ignored, and instead composed from CreateServiceRequest.parent and CreateServiceRequest.service_id. Format: projects/{project}/locations/{location}/services/{service_id} + - !ruby/object:Api::Type::String + name: 'description' + description: | + User-provided description of the Service. This field currently has a 512-character limit. + - !ruby/object:Api::Type::String + name: 'uid' + description: | + Output only. Server assigned unique identifier for the trigger. The value is a UUID4 string and guaranteed to remain unchanged until the resource is deleted. + - !ruby/object:Api::Type::String + name: 'generation' + description: | + Output only. A number that monotonically increases every time the user modifies the desired state. Please note that unlike v1, this is an int64 value. As with most Google APIs, its JSON representation will be a `string` instead of an `integer`. + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + Optional. Unstructured key value map that can be used to organize and categorize objects. User-provided labels are shared with Google's billing system, so they can be used to filter, or break down billing charges by team, component, environment, state, etc. For more information, visit https://cloud.google.com/resource-manager/docs/creating-managing-labels or https://cloud.google.com/run/docs/configuring/labels. Cloud Run API v2 does not support labels with `run.googleapis.com`, `cloud.googleapis.com`, `serving.knative.dev`, or `autoscaling.knative.dev` namespaces, and they will be rejected. All system labels in v1 now have a corresponding field in v2 Service. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'annotations' + description: | + Optional. Unstructured key value map that may be set by external tools to store and arbitrary metadata. They are not queryable and should be preserved when modifying objects. Cloud Run API v2 does not support annotations with `run.googleapis.com`, `cloud.googleapis.com`, `serving.knative.dev`, or `autoscaling.knative.dev` namespaces, and they will be rejected in new resources. All system annotations in v1 now have a corresponding field in v2 Service. This field follows Kubernetes annotations' namespacing, limits, and rules. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Output only. The creation time. + - !ruby/object:Api::Type::String + name: 'updateTime' + description: | + Output only. The last-modified time. + - !ruby/object:Api::Type::String + name: 'deleteTime' + description: | + Output only. The deletion time. It is only populated as a response to a Delete request. + - !ruby/object:Api::Type::String + name: 'expireTime' + description: | + Output only. For a deleted resource, the time after which it will be permamently deleted. + - !ruby/object:Api::Type::String + name: 'creator' + description: | + Output only. Email address of the authenticated creator. + - !ruby/object:Api::Type::String + name: 'lastModifier' + description: | + Output only. Email address of the last authenticated modifier. + - !ruby/object:Api::Type::String + name: 'client' + description: | + Arbitrary identifier for the API client. + - !ruby/object:Api::Type::String + name: 'clientVersion' + description: | + Arbitrary version identifier for the API client. + - !ruby/object:Api::Type::Enum + name: 'ingress' + description: | + Optional. Provides the ingress settings for this Service. On output, returns the currently observed ingress settings, or INGRESS_TRAFFIC_UNSPECIFIED if no revision is active. + values: + - :INGRESS_TRAFFIC_UNSPECIFIED + - :INGRESS_TRAFFIC_ALL + - :INGRESS_TRAFFIC_INTERNAL_ONLY + - :INGRESS_TRAFFIC_INTERNAL_LOAD_BALANCER + - :INGRESS_TRAFFIC_NONE + - !ruby/object:Api::Type::Enum + name: 'launchStage' + description: | + Optional. The launch stage as defined by [Google Cloud Platform Launch Stages](https://cloud.google.com/terms/launch-stages). Cloud Run supports `ALPHA`, `BETA`, and `GA`. If no value is specified, GA is assumed. Set the launch stage to a preview stage on input to allow use of preview features in that stage. On read (or output), describes whether the resource uses preview features. For example, if ALPHA is provided as input, but only BETA and GA-level features are used, this field will be BETA on output. + values: + - :LAUNCH_STAGE_UNSPECIFIED + - :UNIMPLEMENTED + - :PRELAUNCH + - :EARLY_ACCESS + - :ALPHA + - :BETA + - :GA + - :DEPRECATED + - !ruby/object:Api::Type::NestedObject + name: 'binaryAuthorization' + description: | + Settings for Binary Authorization feature. + properties: + - !ruby/object:Api::Type::Boolean + name: 'useDefault' + description: | + Optional. If True, indicates to use the default project's binary authorization policy. If False, binary authorization will be disabled. + - !ruby/object:Api::Type::String + name: 'policy' + description: | + Optional. The path to a binary authorization policy. Format: projects/{project}/platforms/cloudRun/{policy-name} + - !ruby/object:Api::Type::String + name: 'breakglassJustification' + description: | + Optional. If present, indicates to use Breakglass using this justification. If use_default is False, then it must be empty. For more information on breakglass, see https://cloud.google.com/binary-authorization/docs/using-breakglass + - !ruby/object:Api::Type::NestedObject + name: 'template' + description: | + RevisionTemplate describes the data a revision should have when created from a template. + properties: + - !ruby/object:Api::Type::String + name: 'revision' + description: | + Optional. The unique name for the revision. If this field is omitted, it will be automatically generated based on the Service name. + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + Optional. Unstructured key value map that can be used to organize and categorize objects. User-provided labels are shared with Google's billing system, so they can be used to filter, or break down billing charges by team, component, environment, state, etc. For more information, visit https://cloud.google.com/resource-manager/docs/creating-managing-labels or https://cloud.google.com/run/docs/configuring/labels. Cloud Run API v2 does not support labels with `run.googleapis.com`, `cloud.googleapis.com`, `serving.knative.dev`, or `autoscaling.knative.dev` namespaces, and they will be rejected. All system labels in v1 now have a corresponding field in v2 RevisionTemplate. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'annotations' + description: | + Optional. Unstructured key value map that may be set by external tools to store and arbitrary metadata. They are not queryable and should be preserved when modifying objects. Cloud Run API v2 does not support annotations with `run.googleapis.com`, `cloud.googleapis.com`, `serving.knative.dev`, or `autoscaling.knative.dev` namespaces, and they will be rejected. All system annotations in v1 now have a corresponding field in v2 RevisionTemplate. This field follows Kubernetes annotations' namespacing, limits, and rules. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'scaling' + description: | + Settings for revision-level scaling settings. + properties: + - !ruby/object:Api::Type::Integer + name: 'minInstanceCount' + description: | + Optional. Minimum number of serving instances that this resource should have. + - !ruby/object:Api::Type::Integer + name: 'maxInstanceCount' + description: | + Optional. Maximum number of serving instances that this resource should have. + - !ruby/object:Api::Type::NestedObject + name: 'vpcAccess' + description: | + VPC Access settings. For more information on sending traffic to a VPC network, visit https://cloud.google.com/run/docs/configuring/connecting-vpc. + properties: + - !ruby/object:Api::Type::String + name: 'connector' + description: | + VPC Access connector name. Format: projects/{project}/locations/{location}/connectors/{connector}, where {project} can be project id or number. For more information on sending traffic to a VPC network via a connector, visit https://cloud.google.com/run/docs/configuring/vpc-connectors. + - !ruby/object:Api::Type::Enum + name: 'egress' + description: | + Optional. Traffic VPC egress settings. If not provided, it defaults to PRIVATE_RANGES_ONLY. + values: + - :VPC_EGRESS_UNSPECIFIED + - :ALL_TRAFFIC + - :PRIVATE_RANGES_ONLY + - !ruby/object:Api::Type::Array + name: 'networkInterfaces' + description: | + Optional. Direct VPC egress settings. Currently only single network interface is supported. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'network' + description: | + Optional. The VPC network that the Cloud Run resource will be able to send traffic to. At least one of network or subnetwork must be specified. If both network and subnetwork are specified, the given VPC subnetwork must belong to the given VPC network. If network is not specified, it will be looked up from the subnetwork. + - !ruby/object:Api::Type::String + name: 'subnetwork' + description: | + Optional. The VPC subnetwork that the Cloud Run resource will get IPs from. At least one of network or subnetwork must be specified. If both network and subnetwork are specified, the given VPC subnetwork must belong to the given VPC network. If subnetwork is not specified, the subnetwork with the same name with the network will be used. + - !ruby/object:Api::Type::Array + name: 'tags' + description: | + Optional. Network tags applied to this Cloud Run resource. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'timeout' + description: | + Optional. Max allowed time for an instance to respond to a request. + - !ruby/object:Api::Type::String + name: 'serviceAccount' + description: | + Optional. Email address of the IAM service account associated with the revision of the service. The service account represents the identity of the running revision, and determines what permissions the revision has. If not provided, the revision will use the project's default service account. + - !ruby/object:Api::Type::Array + name: 'containers' + description: | + Holds the single container that defines the unit of execution for this Revision. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'name' + description: | + Name of the container specified as a DNS_LABEL (RFC 1123). + - !ruby/object:Api::Type::String + name: 'image' + description: | + Required. Name of the container image in Dockerhub, Google Artifact Registry, or Google Container Registry. If the host is not provided, Dockerhub is assumed. + - !ruby/object:Api::Type::Array + name: 'command' + description: | + Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'args' + description: | + Arguments to the entrypoint. The docker image's CMD is used if this is not provided. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'env' + description: | + List of environment variables to set in the container. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'name' + description: | + Required. Name of the environment variable. Must not exceed 32768 characters. + - !ruby/object:Api::Type::String + name: 'value' + description: | + Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any route environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "", and the maximum length is 32768 bytes. + - !ruby/object:Api::Type::NestedObject + name: 'valueSource' + description: | + EnvVarSource represents a source for the value of an EnvVar. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'secretKeyRef' + description: | + SecretEnvVarSource represents a source for the value of an EnvVar. + properties: + - !ruby/object:Api::Type::String + name: 'secret' + description: | + Required. The name of the secret in Cloud Secret Manager. Format: {secret_name} if the secret is in the same project. projects/{project}/secrets/{secret_name} if the secret is in a different project. + - !ruby/object:Api::Type::String + name: 'version' + description: | + The Cloud Secret Manager secret version. Can be 'latest' for the latest version, an integer for a specific version, or a version alias. + - !ruby/object:Api::Type::NestedObject + name: 'resources' + description: | + ResourceRequirements describes the compute resource requirements. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'limits' + description: | + Only `memory` and `cpu` keys in the map are supported. Notes: * The only supported values for CPU are '1', '2', '4', and '8'. Setting 4 CPU requires at least 2Gi of memory. For more information, go to https://cloud.google.com/run/docs/configuring/cpu. * For supported 'memory' values and syntax, go to https://cloud.google.com/run/docs/configuring/memory-limits + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::Boolean + name: 'cpuIdle' + description: | + Determines whether CPU is only allocated during requests (true by default). However, if ResourceRequirements is set, the caller must explicitly set this field to true to preserve the default behavior. + - !ruby/object:Api::Type::Boolean + name: 'startupCpuBoost' + description: | + Determines whether CPU should be boosted on startup of a new container instance above the requested CPU threshold, this can help reduce cold-start latency. + - !ruby/object:Api::Type::Array + name: 'ports' + description: | + List of ports to expose from the container. Only a single port can be specified. The specified ports must be listening on all interfaces (0.0.0.0) within the container to be accessible. If omitted, a port number will be chosen and passed to the container through the PORT environment variable for the container to listen on. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'name' + description: | + If specified, used to specify which protocol to use. Allowed values are "http1" and "h2c". + - !ruby/object:Api::Type::Integer + name: 'containerPort' + description: | + Port number the container listens on. This must be a valid TCP port number, 0 < container_port < 65536. + - !ruby/object:Api::Type::Array + name: 'volumeMounts' + description: | + Volume to mount into the container's filesystem. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'name' + description: | + Required. This must match the Name of a Volume. + - !ruby/object:Api::Type::String + name: 'mountPath' + description: | + Required. Path within the container at which the volume should be mounted. Must not contain ':'. For Cloud SQL volumes, it can be left empty, or must otherwise be `/cloudsql`. All instances defined in the Volume will be available as `/cloudsql/[instance]`. For more information on Cloud SQL volumes, visit https://cloud.google.com/sql/docs/mysql/connect-run + - !ruby/object:Api::Type::String + name: 'workingDir' + description: | + Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. + - !ruby/object:Api::Type::NestedObject + name: 'livenessProbe' + description: | + Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic. + properties: + - !ruby/object:Api::Type::Integer + name: 'initialDelaySeconds' + description: | + Optional. Number of seconds after the container has started before the probe is initiated. Defaults to 0 seconds. Minimum value is 0. Maximum value for liveness probe is 3600. Maximum value for startup probe is 240. + - !ruby/object:Api::Type::Integer + name: 'timeoutSeconds' + description: | + Optional. Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. Maximum value is 3600. Must be smaller than period_seconds. + - !ruby/object:Api::Type::Integer + name: 'periodSeconds' + description: | + Optional. How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. Maximum value for liveness probe is 3600. Maximum value for startup probe is 240. Must be greater or equal than timeout_seconds. + - !ruby/object:Api::Type::Integer + name: 'failureThreshold' + description: | + Optional. Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + - !ruby/object:Api::Type::NestedObject + name: 'httpGet' + description: | + HTTPGetAction describes an action based on HTTP Get requests. + properties: + - !ruby/object:Api::Type::String + name: 'path' + description: | + Optional. Path to access on the HTTP server. Defaults to '/'. + - !ruby/object:Api::Type::Array + name: 'httpHeaders' + description: | + Optional. Custom headers to set in the request. HTTP allows repeated headers. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'name' + description: | + Required. The header field name + - !ruby/object:Api::Type::String + name: 'value' + description: | + Optional. The header field value + - !ruby/object:Api::Type::Integer + name: 'port' + description: | + Optional. Port number to access on the container. Must be in the range 1 to 65535. If not specified, defaults to the exposed port of the container, which is the value of container.ports[0].containerPort. + - !ruby/object:Api::Type::NestedObject + name: 'tcpSocket' + description: | + TCPSocketAction describes an action based on opening a socket + properties: + - !ruby/object:Api::Type::Integer + name: 'port' + description: | + Optional. Port number to access on the container. Must be in the range 1 to 65535. If not specified, defaults to the exposed port of the container, which is the value of container.ports[0].containerPort. + - !ruby/object:Api::Type::NestedObject + name: 'grpc' + description: | + GRPCAction describes an action involving a GRPC port. + properties: + - !ruby/object:Api::Type::Integer + name: 'port' + description: | + Optional. Port number of the gRPC service. Number must be in the range 1 to 65535. If not specified, defaults to the exposed port of the container, which is the value of container.ports[0].containerPort. + - !ruby/object:Api::Type::String + name: 'service' + description: | + Optional. Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md ). If this is not specified, the default behavior is defined by gRPC. + - !ruby/object:Api::Type::NestedObject + name: 'startupProbe' + description: | + Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic. + properties: + - !ruby/object:Api::Type::Integer + name: 'initialDelaySeconds' + description: | + Optional. Number of seconds after the container has started before the probe is initiated. Defaults to 0 seconds. Minimum value is 0. Maximum value for liveness probe is 3600. Maximum value for startup probe is 240. + - !ruby/object:Api::Type::Integer + name: 'timeoutSeconds' + description: | + Optional. Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. Maximum value is 3600. Must be smaller than period_seconds. + - !ruby/object:Api::Type::Integer + name: 'periodSeconds' + description: | + Optional. How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. Maximum value for liveness probe is 3600. Maximum value for startup probe is 240. Must be greater or equal than timeout_seconds. + - !ruby/object:Api::Type::Integer + name: 'failureThreshold' + description: | + Optional. Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + - !ruby/object:Api::Type::NestedObject + name: 'httpGet' + description: | + HTTPGetAction describes an action based on HTTP Get requests. + properties: + - !ruby/object:Api::Type::String + name: 'path' + description: | + Optional. Path to access on the HTTP server. Defaults to '/'. + - !ruby/object:Api::Type::Array + name: 'httpHeaders' + description: | + Optional. Custom headers to set in the request. HTTP allows repeated headers. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'name' + description: | + Required. The header field name + - !ruby/object:Api::Type::String + name: 'value' + description: | + Optional. The header field value + - !ruby/object:Api::Type::Integer + name: 'port' + description: | + Optional. Port number to access on the container. Must be in the range 1 to 65535. If not specified, defaults to the exposed port of the container, which is the value of container.ports[0].containerPort. + - !ruby/object:Api::Type::NestedObject + name: 'tcpSocket' + description: | + TCPSocketAction describes an action based on opening a socket + properties: + - !ruby/object:Api::Type::Integer + name: 'port' + description: | + Optional. Port number to access on the container. Must be in the range 1 to 65535. If not specified, defaults to the exposed port of the container, which is the value of container.ports[0].containerPort. + - !ruby/object:Api::Type::NestedObject + name: 'grpc' + description: | + GRPCAction describes an action involving a GRPC port. + properties: + - !ruby/object:Api::Type::Integer + name: 'port' + description: | + Optional. Port number of the gRPC service. Number must be in the range 1 to 65535. If not specified, defaults to the exposed port of the container, which is the value of container.ports[0].containerPort. + - !ruby/object:Api::Type::String + name: 'service' + description: | + Optional. Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md ). If this is not specified, the default behavior is defined by gRPC. + - !ruby/object:Api::Type::Array + name: 'dependsOn' + description: | + Names of the containers that must start before this container. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'volumes' + description: | + Optional. A list of Volumes to make available to containers. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'name' + description: | + Required. Volume's name. + - !ruby/object:Api::Type::NestedObject + name: 'secret' + description: | + The secret's value will be presented as the content of a file whose name is defined in the item path. If no items are defined, the name of the file is the secret. + properties: + - !ruby/object:Api::Type::String + name: 'secret' + description: | + Required. The name of the secret in Cloud Secret Manager. Format: {secret} if the secret is in the same project. projects/{project}/secrets/{secret} if the secret is in a different project. + - !ruby/object:Api::Type::Array + name: 'items' + description: | + If unspecified, the volume will expose a file whose name is the secret, relative to VolumeMount.mount_path. If specified, the key will be used as the version to fetch from Cloud Secret Manager and the path will be the name of the file exposed in the volume. When items are defined, they must specify a path and a version. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'path' + description: | + Required. The relative path of the secret in the container. + - !ruby/object:Api::Type::String + name: 'version' + description: | + The Cloud Secret Manager secret version. Can be 'latest' for the latest value, or an integer or a secret alias for a specific version. + - !ruby/object:Api::Type::Integer + name: 'mode' + description: | + Integer octal mode bits to use on this file, must be a value between 01 and 0777 (octal). If 0 or not set, the Volume's default mode will be used. Notes * Internally, a umask of 0222 will be applied to any non-zero value. * This is an integer representation of the mode bits. So, the octal integer value should look exactly as the chmod numeric notation with a leading zero. Some examples: for chmod 777 (a=rwx), set to 0777 (octal) or 511 (base-10). For chmod 640 (u=rw,g=r), set to 0640 (octal) or 416 (base-10). For chmod 755 (u=rwx,g=rx,o=rx), set to 0755 (octal) or 493 (base-10). * This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. + - !ruby/object:Api::Type::Integer + name: 'defaultMode' + description: | + Integer representation of mode bits to use on created files by default. Must be a value between 0000 and 0777 (octal), defaulting to 0444. Directories within the path are not affected by this setting. Notes * Internally, a umask of 0222 will be applied to any non-zero value. * This is an integer representation of the mode bits. So, the octal integer value should look exactly as the chmod numeric notation with a leading zero. Some examples: for chmod 777 (a=rwx), set to 0777 (octal) or 511 (base-10). For chmod 640 (u=rw,g=r), set to 0640 (octal) or 416 (base-10). For chmod 755 (u=rwx,g=rx,o=rx), set to 0755 (octal) or 493 (base-10). * This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. This might be in conflict with other options that affect the file mode, like fsGroup, and as a result, other mode bits could be set. + - !ruby/object:Api::Type::NestedObject + name: 'cloudSqlInstance' + description: | + Represents a set of Cloud SQL instances. Each one will be available under /cloudsql/[instance]. Visit https://cloud.google.com/sql/docs/mysql/connect-run for more information on how to connect Cloud SQL and Cloud Run. + properties: + - !ruby/object:Api::Type::Array + name: 'instances' + description: | + The Cloud SQL instance connection names, as can be found in https://console.cloud.google.com/sql/instances. Visit https://cloud.google.com/sql/docs/mysql/connect-run for more information on how to connect Cloud SQL and Cloud Run. Format: {project}:{location}:{instance} + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'emptyDir' + description: | + In memory (tmpfs) ephemeral storage. It is ephemeral in the sense that when the sandbox is taken down, the data is destroyed with it (it does not persist across sandbox runs). + properties: + - !ruby/object:Api::Type::Enum + name: 'medium' + description: | + The medium on which the data is stored. Acceptable values today is only MEMORY or none. When none, the default will currently be backed by memory but could change over time. +optional + values: + - :MEDIUM_UNSPECIFIED + - :MEMORY + - !ruby/object:Api::Type::String + name: 'sizeLimit' + description: | + Limit on the storage usable by this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers. The default is nil which means that the limit is undefined. More info: https://cloud.google.com/run/docs/configuring/in-memory-volumes#configure-volume. Info in Kubernetes: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir + - !ruby/object:Api::Type::NestedObject + name: 'nfs' + description: | + Represents an NFS mount. + properties: + - !ruby/object:Api::Type::String + name: 'server' + description: | + Hostname or IP address of the NFS server + - !ruby/object:Api::Type::String + name: 'path' + description: | + Path that is exported by the NFS server. + - !ruby/object:Api::Type::Boolean + name: 'readOnly' + description: | + If true, the volume will be mounted as read only for all mounts. + - !ruby/object:Api::Type::NestedObject + name: 'gcs' + description: | + Represents a volume backed by a Cloud Storage bucket using Cloud Storage FUSE. + properties: + - !ruby/object:Api::Type::String + name: 'bucket' + description: | + Cloud Storage Bucket name. + - !ruby/object:Api::Type::Boolean + name: 'readOnly' + description: | + If true, the volume will be mounted as read only for all mounts. + - !ruby/object:Api::Type::Enum + name: 'executionEnvironment' + description: | + Optional. The sandbox environment to host this Revision. + values: + - :EXECUTION_ENVIRONMENT_UNSPECIFIED + - :EXECUTION_ENVIRONMENT_GEN1 + - :EXECUTION_ENVIRONMENT_GEN2 + - !ruby/object:Api::Type::String + name: 'encryptionKey' + description: | + A reference to a customer managed encryption key (CMEK) to use to encrypt this container image. For more information, go to https://cloud.google.com/run/docs/securing/using-cmek + - !ruby/object:Api::Type::Integer + name: 'maxInstanceRequestConcurrency' + description: | + Optional. Sets the maximum number of requests that each serving instance can receive. If not specified or 0, defaults to 80 when requested CPU >= 1 and defaults to 1 when requested CPU < 1. + - !ruby/object:Api::Type::Boolean + name: 'sessionAffinity' + description: | + Optional. Enable session affinity. + - !ruby/object:Api::Type::Boolean + name: 'healthCheckDisabled' + description: | + Optional. Disables health checking containers during deployment. + - !ruby/object:Api::Type::NestedObject + name: 'nodeSelector' + description: | + Hardware constraints configuration. + properties: + - !ruby/object:Api::Type::String + name: 'accelerator' + description: | + Required. GPU accelerator type to attach to an instance. + - !ruby/object:Api::Type::Array + name: 'traffic' + description: | + Optional. Specifies how to distribute traffic over a collection of Revisions belonging to the Service. If traffic is empty or not provided, defaults to 100% traffic to the latest `Ready` Revision. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::Enum + name: 'type' + description: | + The allocation type for this traffic target. + values: + - :TRAFFIC_TARGET_ALLOCATION_TYPE_UNSPECIFIED + - :TRAFFIC_TARGET_ALLOCATION_TYPE_LATEST + - :TRAFFIC_TARGET_ALLOCATION_TYPE_REVISION + - !ruby/object:Api::Type::String + name: 'revision' + description: | + Revision to which to send this portion of traffic, if traffic allocation is by revision. + - !ruby/object:Api::Type::Integer + name: 'percent' + description: | + Specifies percent of the traffic to this Revision. This defaults to zero if unspecified. + - !ruby/object:Api::Type::String + name: 'tag' + description: | + Indicates a string to be part of the URI to exclusively reference this target. + - !ruby/object:Api::Type::NestedObject + name: 'scaling' + description: | + Scaling settings applied at the service level rather than at the revision level. + properties: + - !ruby/object:Api::Type::Integer + name: 'minInstanceCount' + description: | + Optional. total min instances for the service. This number of instances is divided among all revisions with specified traffic based on the percent of traffic they are receiving. (BETA) + - !ruby/object:Api::Type::Boolean + name: 'defaultUriDisabled' + description: | + Optional. Disables public resolution of the default URI of this service. + - !ruby/object:Api::Type::Array + name: 'customAudiences' + description: | + One or more custom audiences that you want this service to support. Specify each custom audience as the full URL in a string. The custom audiences are encoded in the token and used to authenticate requests. For more information, see https://cloud.google.com/run/docs/configuring/custom-audiences. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'observedGeneration' + description: | + Output only. The generation of this Service currently serving traffic. See comments in `reconciling` for additional information on reconciliation process in Cloud Run. Please note that unlike v1, this is an int64 value. As with most Google APIs, its JSON representation will be a `string` instead of an `integer`. + - !ruby/object:Api::Type::NestedObject + name: 'terminalCondition' + description: | + Defines a status condition for a resource. + properties: + - !ruby/object:Api::Type::String + name: 'type' + description: | + type is used to communicate the status of the reconciliation process. See also: https://github.com/knative/serving/blob/main/docs/spec/errors.md#error-conditions-and-reporting Types common to all resources include: * "Ready": True when the Resource is ready. + - !ruby/object:Api::Type::Enum + name: 'state' + description: | + State of the condition. + values: + - :STATE_UNSPECIFIED + - :CONDITION_PENDING + - :CONDITION_RECONCILING + - :CONDITION_FAILED + - :CONDITION_SUCCEEDED + - !ruby/object:Api::Type::String + name: 'message' + description: | + Human readable message indicating details about the current status. + - !ruby/object:Api::Type::String + name: 'lastTransitionTime' + description: | + Last time the condition transitioned from one status to another. + - !ruby/object:Api::Type::Enum + name: 'severity' + description: | + How to interpret failures of this condition, one of Error, Warning, Info + values: + - :SEVERITY_UNSPECIFIED + - :ERROR + - :WARNING + - :INFO + - !ruby/object:Api::Type::Enum + name: 'reason' + description: | + Output only. A common (service-level) reason for this condition. + values: + - :COMMON_REASON_UNDEFINED + - :UNKNOWN + - :REVISION_FAILED + - :PROGRESS_DEADLINE_EXCEEDED + - :CONTAINER_MISSING + - :CONTAINER_PERMISSION_DENIED + - :CONTAINER_IMAGE_UNAUTHORIZED + - :CONTAINER_IMAGE_AUTHORIZATION_CHECK_FAILED + - :ENCRYPTION_KEY_PERMISSION_DENIED + - :ENCRYPTION_KEY_CHECK_FAILED + - :SECRETS_ACCESS_CHECK_FAILED + - :WAITING_FOR_OPERATION + - :IMMEDIATE_RETRY + - :POSTPONED_RETRY + - :INTERNAL + - !ruby/object:Api::Type::Enum + name: 'revisionReason' + description: | + Output only. A reason for the revision condition. + values: + - :REVISION_REASON_UNDEFINED + - :PENDING + - :RESERVE + - :RETIRED + - :RETIRING + - :RECREATING + - :HEALTH_CHECK_CONTAINER_ERROR + - :CUSTOMIZED_PATH_RESPONSE_PENDING + - :MIN_INSTANCES_NOT_PROVISIONED + - :ACTIVE_REVISION_LIMIT_REACHED + - :NO_DEPLOYMENT + - :HEALTH_CHECK_SKIPPED + - :MIN_INSTANCES_WARMING + - !ruby/object:Api::Type::Enum + name: 'executionReason' + description: | + Output only. A reason for the execution condition. + values: + - :EXECUTION_REASON_UNDEFINED + - :JOB_STATUS_SERVICE_POLLING_ERROR + - :NON_ZERO_EXIT_CODE + - :CANCELLED + - :CANCELLING + - :DELETED + - !ruby/object:Api::Type::Array + name: 'conditions' + description: | + Output only. The Conditions of all other associated sub-resources. They contain additional diagnostics information in case the Service does not reach its Serving state. See comments in `reconciling` for additional information on reconciliation process in Cloud Run. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'type' + description: | + type is used to communicate the status of the reconciliation process. See also: https://github.com/knative/serving/blob/main/docs/spec/errors.md#error-conditions-and-reporting Types common to all resources include: * "Ready": True when the Resource is ready. + - !ruby/object:Api::Type::Enum + name: 'state' + description: | + State of the condition. + values: + - :STATE_UNSPECIFIED + - :CONDITION_PENDING + - :CONDITION_RECONCILING + - :CONDITION_FAILED + - :CONDITION_SUCCEEDED + - !ruby/object:Api::Type::String + name: 'message' + description: | + Human readable message indicating details about the current status. + - !ruby/object:Api::Type::String + name: 'lastTransitionTime' + description: | + Last time the condition transitioned from one status to another. + - !ruby/object:Api::Type::Enum + name: 'severity' + description: | + How to interpret failures of this condition, one of Error, Warning, Info + values: + - :SEVERITY_UNSPECIFIED + - :ERROR + - :WARNING + - :INFO + - !ruby/object:Api::Type::Enum + name: 'reason' + description: | + Output only. A common (service-level) reason for this condition. + values: + - :COMMON_REASON_UNDEFINED + - :UNKNOWN + - :REVISION_FAILED + - :PROGRESS_DEADLINE_EXCEEDED + - :CONTAINER_MISSING + - :CONTAINER_PERMISSION_DENIED + - :CONTAINER_IMAGE_UNAUTHORIZED + - :CONTAINER_IMAGE_AUTHORIZATION_CHECK_FAILED + - :ENCRYPTION_KEY_PERMISSION_DENIED + - :ENCRYPTION_KEY_CHECK_FAILED + - :SECRETS_ACCESS_CHECK_FAILED + - :WAITING_FOR_OPERATION + - :IMMEDIATE_RETRY + - :POSTPONED_RETRY + - :INTERNAL + - !ruby/object:Api::Type::Enum + name: 'revisionReason' + description: | + Output only. A reason for the revision condition. + values: + - :REVISION_REASON_UNDEFINED + - :PENDING + - :RESERVE + - :RETIRED + - :RETIRING + - :RECREATING + - :HEALTH_CHECK_CONTAINER_ERROR + - :CUSTOMIZED_PATH_RESPONSE_PENDING + - :MIN_INSTANCES_NOT_PROVISIONED + - :ACTIVE_REVISION_LIMIT_REACHED + - :NO_DEPLOYMENT + - :HEALTH_CHECK_SKIPPED + - :MIN_INSTANCES_WARMING + - !ruby/object:Api::Type::Enum + name: 'executionReason' + description: | + Output only. A reason for the execution condition. + values: + - :EXECUTION_REASON_UNDEFINED + - :JOB_STATUS_SERVICE_POLLING_ERROR + - :NON_ZERO_EXIT_CODE + - :CANCELLED + - :CANCELLING + - :DELETED + - !ruby/object:Api::Type::String + name: 'latestReadyRevision' + description: | + Output only. Name of the latest revision that is serving traffic. See comments in `reconciling` for additional information on reconciliation process in Cloud Run. + - !ruby/object:Api::Type::String + name: 'latestCreatedRevision' + description: | + Output only. Name of the last created revision. See comments in `reconciling` for additional information on reconciliation process in Cloud Run. + - !ruby/object:Api::Type::Array + name: 'trafficStatuses' + description: | + Output only. Detailed status information for corresponding traffic targets. See comments in `reconciling` for additional information on reconciliation process in Cloud Run. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::Enum + name: 'type' + description: | + The allocation type for this traffic target. + values: + - :TRAFFIC_TARGET_ALLOCATION_TYPE_UNSPECIFIED + - :TRAFFIC_TARGET_ALLOCATION_TYPE_LATEST + - :TRAFFIC_TARGET_ALLOCATION_TYPE_REVISION + - !ruby/object:Api::Type::String + name: 'revision' + description: | + Revision to which this traffic is sent. + - !ruby/object:Api::Type::Integer + name: 'percent' + description: | + Specifies percent of the traffic to this Revision. + - !ruby/object:Api::Type::String + name: 'tag' + description: | + Indicates the string used in the URI to exclusively reference this target. + - !ruby/object:Api::Type::String + name: 'uri' + description: | + Displays the target URI. + - !ruby/object:Api::Type::String + name: 'uri' + description: | + Output only. The main URI in which this Service is serving traffic. + - !ruby/object:Api::Type::Boolean + name: 'satisfiesPzs' + description: | + Output only. Reserved for future use. + - !ruby/object:Api::Type::Boolean + name: 'reconciling' + description: | + Output only. Returns true if the Service is currently being acted upon by the system to bring it into the desired state. When a new Service is created, or an existing one is updated, Cloud Run will asynchronously perform all necessary steps to bring the Service to the desired serving state. This process is called reconciliation. While reconciliation is in process, `observed_generation`, `latest_ready_revison`, `traffic_statuses`, and `uri` will have transient values that might mismatch the intended state: Once reconciliation is over (and this field is false), there are two possible outcomes: reconciliation succeeded and the serving state matches the Service, or there was an error, and reconciliation failed. This state can be found in `terminal_condition.state`. If reconciliation succeeded, the following fields will match: `traffic` and `traffic_statuses`, `observed_generation` and `generation`, `latest_ready_revision` and `latest_created_revision`. If reconciliation failed, `traffic_statuses`, `observed_generation`, and `latest_ready_revision` will have the state of the last serving revision, or empty for newly created Services. Additional information on the failure can be found in `terminal_condition` and `conditions`. + - !ruby/object:Api::Type::String + name: 'etag' + description: | + Output only. A system-generated fingerprint for this version of the resource. May be used to detect modification conflict during updates. + diff --git a/mmv1/products/run/inspec.yaml b/mmv1/products/run/inspec.yaml new file mode 100644 index 000000000..28cb365e9 --- /dev/null +++ b/mmv1/products/run/inspec.yaml @@ -0,0 +1,15 @@ +# Copyright 2017 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- !ruby/object:Provider::Inspec::Config +overrides: !ruby/object:Overrides::ResourceOverrides diff --git a/mmv1/products/secretmanager/inspec.yaml b/mmv1/products/secretmanager/inspec.yaml new file mode 100644 index 000000000..28cb365e9 --- /dev/null +++ b/mmv1/products/secretmanager/inspec.yaml @@ -0,0 +1,15 @@ +# Copyright 2017 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- !ruby/object:Provider::Inspec::Config +overrides: !ruby/object:Overrides::ResourceOverrides diff --git a/mmv1/products/servicenetworking/api.yaml b/mmv1/products/servicenetworking/api.yaml new file mode 100644 index 000000000..2c24e79f2 --- /dev/null +++ b/mmv1/products/servicenetworking/api.yaml @@ -0,0 +1,55 @@ + +--- !ruby/object:Api::Product +name: ServiceNetworking +display_name: ServiceNetworking +versions: + - !ruby/object:Api::Product::Version + name: ga + base_url: https://servicenetworking.googleapis.com//v1/ +scopes: + - https://servicenetworking.googleapis.com//auth/cloud-platform +apis_required: + - !ruby/object:Api::Product::ApiReference + name: https://servicenetworking.googleapis.com/ + url: https://console.cloud.google.com/apis/library/servicenetworking.googleapis.com/ +objects: + + - !ruby/object:Api::Resource + name: 'service_connection' + kind: 'compute#services_connections' + base_url: '{{parent}}/connections' + collection_url_key: 'items' + input: true + has_self_link: true + description: | + An instance is a virtual machine (VM) hosted on Google's infrastructure. + properties: + - !ruby/object:Api::Type::String + name: 'network' + description: | + The name of service consumer's VPC network + that's connected with service producer network + - !ruby/object:Api::Type::Array + name: 'reservedPeeringRanges' + description: | + The name of one or more allocated IP address ranges for this + service producer of type PEERING. Note that invoking connections. + create method with a different range when connection is already + established will not modify already provisioned service producer subnetworks. + If connections.create method is invoked repeatedly to reconnect when peering + connection had been disconnected on the consumer side, leaving this field + empty will restore previously allocated IP ranges. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'peering' + description: | + The name of the VPC Network Peering connection + that was created by the service producer. + output: true + + - !ruby/object:Api::Type::String + name: 'service' + description: | + The name of the peering service that's associated + with this connection, in the following format: services/{service name}. + output: true diff --git a/mmv1/products/servicenetworking/inspec.yaml b/mmv1/products/servicenetworking/inspec.yaml new file mode 100644 index 000000000..28cb365e9 --- /dev/null +++ b/mmv1/products/servicenetworking/inspec.yaml @@ -0,0 +1,15 @@ +# Copyright 2017 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- !ruby/object:Provider::Inspec::Config +overrides: !ruby/object:Overrides::ResourceOverrides diff --git a/mmv1/products/sql/api.yaml b/mmv1/products/sql/api.yaml index f188681fa..94b636be4 100644 --- a/mmv1/products/sql/api.yaml +++ b/mmv1/products/sql/api.yaml @@ -17,7 +17,7 @@ display_name: Cloud SQL versions: - !ruby/object:Api::Product::Version name: ga - base_url: https://sqladmin.googleapis.com/sql/v1beta4/ + base_url: https://sqladmin.googleapis.com/sql/v1/ scopes: - https://www.googleapis.com/auth/sqlservice.admin apis_required: @@ -68,6 +68,10 @@ objects: - :FIRST_GEN - :SECOND_GEN - :EXTERNAL + - !ruby/object:Api::Type::String + name: 'kind' + description: | + This is always sql#instancesList. - !ruby/object:Api::Type::String name: 'connectionName' description: | @@ -287,6 +291,10 @@ objects: name: 'settings' description: 'The user settings.' properties: + - !ruby/object:Api::Type::String + name: 'kind' + description: | + This is always sql#settings. - !ruby/object:Api::Type::Array name: 'databaseFlags' description: The database flags passed to the instance at startup @@ -399,6 +407,57 @@ objects: name: 'userLabels' description: | User-provided labels, represented as a dictionary where each label is a single key value pair. + - !ruby/object:Api::Type::Enum + name: 'activationPolicy' + description: | + Specifies when the instance is activated. + required: true + values: + - :SQL_ACTIVATION_POLICY_UNSPECIFIED + - :ALWAYS + - :NEVER + - !ruby/object:Api::Type::String + name: 'dataDiskSizeGb' + output: true + description: | + The size of data disk, in GB. The data disk size minimum is 10GB. + - !ruby/object:Api::Type::Enum + name: 'dataDiskType' + description: | + Specifies when the instance is activated. + required: true + values: + - :SQL_ACTIVATION_POLICY_UNSPECIFIED + - :ALWAYS + - :NEVER + - !ruby/object:Api::Type::Enum + name: 'pricingPlan' + description: | + The pricing plan for this instance. + required: true + values: + - :SQL_PRICING_PLAN_UNSPECIFIED + - :PACKAGE + - :PER_USE + - !ruby/object:Api::Type::Enum + name: 'replicationType' + description: | + The pricing plan for this instance. + required: true + values: + - :SQL_REPLICATION_TYPE_UNSPECIFIED + - :SYNCHRONOUS + - :ASYNCHRONOUS + - !ruby/object:Api::Type::Boolean + name: 'storageAutoResize' + description: | + Configuration to increase storage size automatically. The default value is true. + output: true + - !ruby/object:Api::Type::String + name: 'storageAutoResizeLimit' + description: | + The maximum size to which storage capacity can be automatically increased. + The default value is 0, which specifies that there is no limit. - !ruby/object:Api::Type::String name: 'gceZone' output: true diff --git a/mmv1/products/sql/inspec.yaml b/mmv1/products/sql/inspec.yaml index 818ebc71d..e16dc9b51 100644 --- a/mmv1/products/sql/inspec.yaml +++ b/mmv1/products/sql/inspec.yaml @@ -15,9 +15,9 @@ overrides: !ruby/object:Overrides::ResourceOverrides Instance: !ruby/object:Overrides::Inspec::ResourceOverride name: DatabaseInstance - self_link: projects/{{project}}/instances/{{database}} - product_url: https://www.googleapis.com/sql/v1beta4/ - additional_functions: third_party/inspec/custom_functions/google_sql_database.erb + self_link: projects/{{project}}/instances/{{instance}} + product_url: https://sqladmin.googleapis.com/v1/ + additional_functions: third_party/inspec/custom_functions/google_sql_database_instance.erb singular_extra_examples: third_party/inspec/documentation/google_sql_database_instance.md plural_extra_examples: third_party/inspec/documentation/google_sql_database_instances.md properties: @@ -32,9 +32,9 @@ overrides: !ruby/object:Overrides::ResourceOverrides gceZone: !ruby/object:Overrides::Inspec::PropertyOverride override_name: instance_zone User: !ruby/object:Overrides::Inspec::ResourceOverride - base_url: projects/{{project}}/instances/{{database}}/users - self_link: projects/{{project}}/instances/{{database}}/users - product_url: https://www.googleapis.com/sql/v1beta4/ + base_url: projects/{{project}}/instances/{{instance}}/users + self_link: projects/{{project}}/instances/{{instance}}/users + product_url: https://www.googleapis.com/sql/v1/ plural_extra_examples: third_party/inspec/documentation/google_sql_users.md properties: instance: !ruby/object:Overrides::Inspec::PropertyOverride diff --git a/mmv1/products/vertex_ai b/mmv1/products/vertex_ai new file mode 100644 index 000000000..28cb365e9 --- /dev/null +++ b/mmv1/products/vertex_ai @@ -0,0 +1,15 @@ +# Copyright 2017 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- !ruby/object:Provider::Inspec::Config +overrides: !ruby/object:Overrides::ResourceOverrides diff --git a/mmv1/products/vertexai/api.yaml b/mmv1/products/vertexai/api.yaml index ac6e40c8b..5d175e1e6 100644 --- a/mmv1/products/vertexai/api.yaml +++ b/mmv1/products/vertexai/api.yaml @@ -25,84 +25,6 @@ scopes: - https://www.googleapis.com/auth/cloud-platform objects: # Vertex AI Datasets - - !ruby/object:Api::Resource - name: Dataset - base_url: projects/{{project}}/locations/{{region}}/datasets - self_link: '{{name}}' - update_verb: :PATCH - update_mask: true - references: !ruby/object:Api::Resource::ReferenceLinks - guides: - 'Official Documentation': - 'https://cloud.google.com/vertex-ai/docs' - api: 'https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.datasets' - async: !ruby/object:Api::OpAsync - operation: !ruby/object:Api::OpAsync::Operation - path: 'name' - base_url: '{{op_id}}' - wait_ms: 1000 - result: !ruby/object:Api::OpAsync::Result - path: 'response' - resource_inside_response: true - status: !ruby/object:Api::OpAsync::Status - path: 'done' - complete: True - allowed: - - True - - False - error: !ruby/object:Api::OpAsync::Error - path: 'error' - message: 'message' - description: |- - A collection of DataItems and Annotations on them. - parameters: - - !ruby/object:Api::Type::String - name: region - description: The region of the dataset. eg us-central1 - url_param_only: true - input: true - properties: - - !ruby/object:Api::Type::String - name: 'name' - description: The resource name of the Dataset. This value is set by Google. - output: true - - !ruby/object:Api::Type::String - name: 'displayName' - required: true - description: | - The user-defined name of the Dataset. The name can be up to 128 characters long and can be consist of any UTF-8 characters. - - !ruby/object:Api::Type::String - name: 'createTime' - output: true - description: | - The timestamp of when the dataset was created in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. - - !ruby/object:Api::Type::String - name: 'updateTime' - output: true - description: | - The timestamp of when the dataset was last updated in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. - - !ruby/object:Api::Type::KeyValuePairs - name: 'labels' - description: | - A set of key/value label pairs to assign to this Workflow. - - !ruby/object:Api::Type::NestedObject - name: 'encryptionSpec' - input: true - description: | - Customer-managed encryption key spec for a Dataset. If set, this Dataset and all sub-resources of this Dataset will be secured by this key. - properties: - - !ruby/object:Api::Type::String - name: 'kmsKeyName' - description: | - Required. The Cloud KMS resource identifier of the customer managed encryption key used to protect a resource. - Has the form: projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key. The key needs to be in the same region as where the resource is created. - input: true - - !ruby/object:Api::Type::String - name: 'metadataSchemaUri' - required: true - input: true - description: | - Points to a YAML file stored on Google Cloud Storage describing additional information about the Dataset. The schema is defined as an OpenAPI 3.0.2 Schema Object. The schema files that can be used here are found in gs://google-cloud-aiplatform/schema/dataset/metadata/. # Vertex AI Featurestores - !ruby/object:Api::Resource name: Featurestore @@ -349,3 +271,9988 @@ objects: The disk utilization of the MetadataStore in bytes. output: true + + - !ruby/object:Api::Resource + name: Dataset + base_url: '{{parent}}/datasets' + self_link: '{{name}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/vertex-ai/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + A collection of DataItems and Annotations on them. + properties: + + - !ruby/object:Api::Type::Array + name: 'savedQueries' + description: | + All SavedQueries belong to the Dataset will be returned in List/Get Dataset response. The annotation_specs field will not be populated except for UI cases which will only use annotation_spec_count. In CreateDataset request, a SavedQuery is created together if this field is set, up to one SavedQuery can be set in CreateDatasetRequest. The SavedQuery should not contain any AnnotationSpec. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::Integer + name: 'annotationSpecCount' + description: | + Output only. Number of AnnotationSpecs in the context of the SavedQuery. + - !ruby/object:Api::Type::String + name: 'updateTime' + description: | + Output only. Timestamp when SavedQuery was last updated. + - !ruby/object:Api::Type::Boolean + name: 'supportAutomlTraining' + description: | + Output only. If the Annotations belonging to the SavedQuery can be used for AutoML training. + - !ruby/object:Api::Type::String + name: 'metadata' + description: | + Some additional information about the SavedQuery. + - !ruby/object:Api::Type::String + name: 'problemType' + description: | + Required. Problem type of the SavedQuery. Allowed values: * IMAGE_CLASSIFICATION_SINGLE_LABEL * IMAGE_CLASSIFICATION_MULTI_LABEL * IMAGE_BOUNDING_POLY * IMAGE_BOUNDING_BOX * TEXT_CLASSIFICATION_SINGLE_LABEL * TEXT_CLASSIFICATION_MULTI_LABEL * TEXT_EXTRACTION * TEXT_SENTIMENT * VIDEO_CLASSIFICATION * VIDEO_OBJECT_TRACKING + - !ruby/object:Api::Type::String + name: 'name' + description: | + Output only. Resource name of the SavedQuery. + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Output only. Timestamp when this SavedQuery was created. + - !ruby/object:Api::Type::String + name: 'etag' + description: | + Used to perform a consistent read-modify-write update. If not set, a blind "overwrite" update happens. + - !ruby/object:Api::Type::String + name: 'displayName' + description: | + Required. The user-defined name of the SavedQuery. The name can be up to 128 characters long and can consist of any UTF-8 characters. + - !ruby/object:Api::Type::String + name: 'annotationFilter' + description: | + Output only. Filters on the Annotations in the dataset. + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Output only. Timestamp when this Dataset was created. + - !ruby/object:Api::Type::NestedObject + name: 'encryptionSpec' + description: | + Represents a customer-managed encryption key spec that can be applied to a top-level resource. + properties: + - !ruby/object:Api::Type::String + name: 'kmsKeyName' + description: | + Required. The Cloud KMS resource identifier of the customer managed encryption key used to protect a resource. Has the form: `projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key`. The key needs to be in the same region as where the compute resource is created. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Output only. The resource name of the Dataset. + - !ruby/object:Api::Type::String + name: 'metadata' + description: | + Required. Additional information about the Dataset. + - !ruby/object:Api::Type::String + name: 'etag' + description: | + Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + - !ruby/object:Api::Type::String + name: 'description' + description: | + The description of the Dataset. + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + The labels with user-defined metadata to organize your Datasets. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. No more than 64 user labels can be associated with one Dataset (System labels are excluded). See https://goo.gl/xmQnxf for more information and examples of labels. System reserved label keys are prefixed with "aiplatform.googleapis.com/" and are immutable. Following system labels exist for each Dataset: * "aiplatform.googleapis.com/dataset_metadata_schema": output only, its value is the metadata_schema's title. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::String + name: 'metadataSchemaUri' + description: | + Required. Points to a YAML file stored on Google Cloud Storage describing additional information about the Dataset. The schema is defined as an OpenAPI 3.0.2 Schema Object. The schema files that can be used here are found in gs://google-cloud-aiplatform/schema/dataset/metadata/. + - !ruby/object:Api::Type::String + name: 'metadataArtifact' + description: | + Output only. The resource name of the Artifact that was created in MetadataStore when creating the Dataset. The Artifact resource name pattern is `projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}`. + - !ruby/object:Api::Type::String + name: 'updateTime' + description: | + Output only. Timestamp when this Dataset was last updated. + - !ruby/object:Api::Type::String + name: 'dataItemCount' + description: | + Output only. The number of DataItems in this Dataset. Only apply for non-structured Dataset. + - !ruby/object:Api::Type::String + name: 'displayName' + description: | + Required. The user-defined name of the Dataset. The name can be up to 128 characters long and can consist of any UTF-8 characters. + + - !ruby/object:Api::Resource + name: TrainingPipeline + base_url: '{{parent}}/trainingPipelines' + self_link: '{{name}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/vertex-ai/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + The TrainingPipeline orchestrates tasks associated with training a Model. It always executes the training task, and optionally may also export data from Vertex AI's Dataset which becomes the training input, upload the Model to Vertex AI, and evaluate the Model. + properties: + + - !ruby/object:Api::Type::String + name: 'endTime' + description: | + Output only. Time when the TrainingPipeline entered any of the following states: `PIPELINE_STATE_SUCCEEDED`, `PIPELINE_STATE_FAILED`, `PIPELINE_STATE_CANCELLED`. + - !ruby/object:Api::Type::NestedObject + name: 'error' + description: | + The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). + properties: + - !ruby/object:Api::Type::Array + name: 'details' + description: | + A list of messages that carry the error details. There is a common set of message types for APIs to use. + item_type: Api::Type::String + - !ruby/object:Api::Type::Integer + name: 'code' + description: | + The status code, which should be an enum value of google.rpc.Code. + - !ruby/object:Api::Type::String + name: 'message' + description: | + A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client. + - !ruby/object:Api::Type::String + name: 'displayName' + description: | + Required. The user-defined name of this TrainingPipeline. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Output only. Resource name of the TrainingPipeline. + - !ruby/object:Api::Type::NestedObject + name: 'inputDataConfig' + description: | + Specifies Vertex AI owned input data to be used for training, and possibly evaluating, the Model. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'fractionSplit' + description: | + Assigns the input data to training, validation, and test sets as per the given fractions. Any of `training_fraction`, `validation_fraction` and `test_fraction` may optionally be provided, they must sum to up to 1. If the provided ones sum to less than 1, the remainder is assigned to sets as decided by Vertex AI. If none of the fractions are set, by default roughly 80% of data is used for training, 10% for validation, and 10% for test. + properties: + - !ruby/object:Api::Type::Integer + name: 'trainingFraction' + description: | + The fraction of the input data that is to be used to train the Model. + - !ruby/object:Api::Type::Integer + name: 'testFraction' + description: | + The fraction of the input data that is to be used to evaluate the Model. + - !ruby/object:Api::Type::Integer + name: 'validationFraction' + description: | + The fraction of the input data that is to be used to validate the Model. + - !ruby/object:Api::Type::Boolean + name: 'persistMlUseAssignment' + description: | + Whether to persist the ML use assignment to data item system labels. + - !ruby/object:Api::Type::String + name: 'savedQueryId' + description: | + Only applicable to Datasets that have SavedQueries. The ID of a SavedQuery (annotation set) under the Dataset specified by dataset_id used for filtering Annotations for training. Only Annotations that are associated with this SavedQuery are used in respectively training. When used in conjunction with annotations_filter, the Annotations used for training are filtered by both saved_query_id and annotations_filter. Only one of saved_query_id and annotation_schema_uri should be specified as both of them represent the same thing: problem type. + - !ruby/object:Api::Type::String + name: 'annotationsFilter' + description: | + Applicable only to Datasets that have DataItems and Annotations. A filter on Annotations of the Dataset. Only Annotations that both match this filter and belong to DataItems not ignored by the split method are used in respectively training, validation or test role, depending on the role of the DataItem they are on (for the auto-assigned that role is decided by Vertex AI). A filter with same syntax as the one used in ListAnnotations may be used, but note here it filters across all Annotations of the Dataset, and not just within a single DataItem. + - !ruby/object:Api::Type::NestedObject + name: 'gcsDestination' + description: | + The Google Cloud Storage location where the output is to be written to. + properties: + - !ruby/object:Api::Type::String + name: 'outputUriPrefix' + description: | + Required. Google Cloud Storage URI to output directory. If the uri doesn't end with '/', a '/' will be automatically appended. The directory is created if it doesn't exist. + - !ruby/object:Api::Type::NestedObject + name: 'bigqueryDestination' + description: | + The BigQuery location for the output content. + properties: + - !ruby/object:Api::Type::String + name: 'outputUri' + description: | + Required. BigQuery URI to a project or table, up to 2000 characters long. When only the project is specified, the Dataset and Table is created. When the full table reference is specified, the Dataset must exist and table must not exist. Accepted forms: * BigQuery path. For example: `bq://projectId` or `bq://projectId.bqDatasetId` or `bq://projectId.bqDatasetId.bqTableId`. + - !ruby/object:Api::Type::NestedObject + name: 'stratifiedSplit' + description: | + Assigns input data to the training, validation, and test sets so that the distribution of values found in the categorical column (as specified by the `key` field) is mirrored within each split. The fraction values determine the relative sizes of the splits. For example, if the specified column has three values, with 50% of the rows having value "A", 25% value "B", and 25% value "C", and the split fractions are specified as 80/10/10, then the training set will constitute 80% of the training data, with about 50% of the training set rows having the value "A" for the specified column, about 25% having the value "B", and about 25% having the value "C". Only the top 500 occurring values are used; any values not in the top 500 values are randomly assigned to a split. If less than three rows contain a specific value, those rows are randomly assigned. Supported only for tabular Datasets. + properties: + - !ruby/object:Api::Type::Integer + name: 'trainingFraction' + description: | + The fraction of the input data that is to be used to train the Model. + - !ruby/object:Api::Type::Integer + name: 'testFraction' + description: | + The fraction of the input data that is to be used to evaluate the Model. + - !ruby/object:Api::Type::String + name: 'key' + description: | + Required. The key is a name of one of the Dataset's data columns. The key provided must be for a categorical column. + - !ruby/object:Api::Type::Integer + name: 'validationFraction' + description: | + The fraction of the input data that is to be used to validate the Model. + - !ruby/object:Api::Type::String + name: 'annotationSchemaUri' + description: | + Applicable only to custom training with Datasets that have DataItems and Annotations. Cloud Storage URI that points to a YAML file describing the annotation schema. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). The schema files that can be used here are found in gs://google-cloud-aiplatform/schema/dataset/annotation/ , note that the chosen schema must be consistent with metadata of the Dataset specified by dataset_id. Only Annotations that both match this schema and belong to DataItems not ignored by the split method are used in respectively training, validation or test role, depending on the role of the DataItem they are on. When used in conjunction with annotations_filter, the Annotations used for training are filtered by both annotations_filter and annotation_schema_uri. + - !ruby/object:Api::Type::NestedObject + name: 'predefinedSplit' + description: | + Assigns input data to training, validation, and test sets based on the value of a provided key. Supported only for tabular Datasets. + properties: + - !ruby/object:Api::Type::String + name: 'key' + description: | + Required. The key is a name of one of the Dataset's data columns. The value of the key (either the label's value or value in the column) must be one of {`training`, `validation`, `test`}, and it defines to which set the given piece of data is assigned. If for a piece of data the key is not present or has an invalid value, that piece is ignored by the pipeline. + - !ruby/object:Api::Type::NestedObject + name: 'filterSplit' + description: | + Assigns input data to training, validation, and test sets based on the given filters, data pieces not matched by any filter are ignored. Currently only supported for Datasets containing DataItems. If any of the filters in this message are to match nothing, then they can be set as '-' (the minus sign). Supported only for unstructured Datasets. + properties: + - !ruby/object:Api::Type::String + name: 'validationFilter' + description: | + Required. A filter on DataItems of the Dataset. DataItems that match this filter are used to validate the Model. A filter with same syntax as the one used in DatasetService.ListDataItems may be used. If a single DataItem is matched by more than one of the FilterSplit filters, then it is assigned to the first set that applies to it in the training, validation, test order. + - !ruby/object:Api::Type::String + name: 'testFilter' + description: | + Required. A filter on DataItems of the Dataset. DataItems that match this filter are used to test the Model. A filter with same syntax as the one used in DatasetService.ListDataItems may be used. If a single DataItem is matched by more than one of the FilterSplit filters, then it is assigned to the first set that applies to it in the training, validation, test order. + - !ruby/object:Api::Type::String + name: 'trainingFilter' + description: | + Required. A filter on DataItems of the Dataset. DataItems that match this filter are used to train the Model. A filter with same syntax as the one used in DatasetService.ListDataItems may be used. If a single DataItem is matched by more than one of the FilterSplit filters, then it is assigned to the first set that applies to it in the training, validation, test order. + - !ruby/object:Api::Type::NestedObject + name: 'timestampSplit' + description: | + Assigns input data to training, validation, and test sets based on a provided timestamps. The youngest data pieces are assigned to training set, next to validation set, and the oldest to the test set. Supported only for tabular Datasets. + properties: + - !ruby/object:Api::Type::Integer + name: 'testFraction' + description: | + The fraction of the input data that is to be used to evaluate the Model. + - !ruby/object:Api::Type::Integer + name: 'trainingFraction' + description: | + The fraction of the input data that is to be used to train the Model. + - !ruby/object:Api::Type::Integer + name: 'validationFraction' + description: | + The fraction of the input data that is to be used to validate the Model. + - !ruby/object:Api::Type::String + name: 'key' + description: | + Required. The key is a name of one of the Dataset's data columns. The values of the key (the values in the column) must be in RFC 3339 `date-time` format, where `time-offset` = `"Z"` (e.g. 1985-04-12T23:20:50.52Z). If for a piece of data the key is not present or has an invalid value, that piece is ignored by the pipeline. + - !ruby/object:Api::Type::String + name: 'datasetId' + description: | + Required. The ID of the Dataset in the same Project and Location which data will be used to train the Model. The Dataset must use schema compatible with Model being trained, and what is compatible should be described in the used TrainingPipeline's training_task_definition. For tabular Datasets, all their data is exported to training, to pick and choose from. + - !ruby/object:Api::Type::String + name: 'parentModel' + description: | + Optional. When specify this field, the `model_to_upload` will not be uploaded as a new model, instead, it will become a new version of this `parent_model`. + - !ruby/object:Api::Type::String + name: 'updateTime' + description: | + Output only. Time when the TrainingPipeline was most recently updated. + - !ruby/object:Api::Type::Enum + name: 'state' + description: | + Output only. The detailed state of the pipeline. + values: + - :PIPELINE_STATE_UNSPECIFIED + - :PIPELINE_STATE_QUEUED + - :PIPELINE_STATE_PENDING + - :PIPELINE_STATE_RUNNING + - :PIPELINE_STATE_SUCCEEDED + - :PIPELINE_STATE_FAILED + - :PIPELINE_STATE_CANCELLING + - :PIPELINE_STATE_CANCELLED + - :PIPELINE_STATE_PAUSED + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + The labels with user-defined metadata to organize TrainingPipelines. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::String + name: 'trainingTaskDefinition' + description: | + Required. A Google Cloud Storage path to the YAML file that defines the training task which is responsible for producing the model artifact, and may also include additional auxiliary work. The definition files that can be used here are found in gs://google-cloud-aiplatform/schema/trainingjob/definition/. Note: The URI given on output will be immutable and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. + - !ruby/object:Api::Type::String + name: 'startTime' + description: | + Output only. Time when the TrainingPipeline for the first time entered the `PIPELINE_STATE_RUNNING` state. + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Output only. Time when the TrainingPipeline was created. + - !ruby/object:Api::Type::String + name: 'trainingTaskMetadata' + description: | + Output only. The metadata information as specified in the training_task_definition's `metadata`. This metadata is an auxiliary runtime and final information about the training task. While the pipeline is running this information is populated only at a best effort basis. Only present if the pipeline's training_task_definition contains `metadata` object. + - !ruby/object:Api::Type::String + name: 'trainingTaskInputs' + description: | + Required. The training task's parameter(s), as specified in the training_task_definition's `inputs`. + - !ruby/object:Api::Type::String + name: 'modelId' + description: | + Optional. The ID to use for the uploaded Model, which will become the final component of the model resource name. This value may be up to 63 characters, and valid characters are `[a-z0-9_-]`. The first character cannot be a number or hyphen. + - !ruby/object:Api::Type::NestedObject + name: 'encryptionSpec' + description: | + Represents a customer-managed encryption key spec that can be applied to a top-level resource. + properties: + - !ruby/object:Api::Type::String + name: 'kmsKeyName' + description: | + Required. The Cloud KMS resource identifier of the customer managed encryption key used to protect a resource. Has the form: `projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key`. The key needs to be in the same region as where the compute resource is created. + - !ruby/object:Api::Type::NestedObject + name: 'modelToUpload' + description: | + A trained machine learning Model. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'modelSourceInfo' + description: | + Detail description of the source information of the model. + properties: + - !ruby/object:Api::Type::Boolean + name: 'copy' + description: | + If this Model is copy of another Model. If true then source_type pertains to the original. + - !ruby/object:Api::Type::Enum + name: 'sourceType' + description: | + Type of the model source. + values: + - :MODEL_SOURCE_TYPE_UNSPECIFIED + - :AUTOML + - :CUSTOM + - :BQML + - :MODEL_GARDEN + - :GENIE + - !ruby/object:Api::Type::String + name: 'name' + description: | + The resource name of the Model. + - !ruby/object:Api::Type::String + name: 'metadata' + description: | + Immutable. An additional information about the Model; the schema of the metadata can be found in metadata_schema. Unset if the Model does not have any additional information. + - !ruby/object:Api::Type::String + name: 'updateTime' + description: | + Output only. Timestamp when this Model was most recently updated. + - !ruby/object:Api::Type::String + name: 'etag' + description: | + Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + - !ruby/object:Api::Type::String + name: 'description' + description: | + The description of the Model. + - !ruby/object:Api::Type::Array + name: 'deployedModels' + description: | + Output only. The pointers to DeployedModels created from this Model. Note that Model could have been deployed to Endpoints in different Locations. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'deployedModelId' + description: | + Immutable. An ID of a DeployedModel in the above Endpoint. + - !ruby/object:Api::Type::String + name: 'endpoint' + description: | + Immutable. A resource name of an Endpoint. + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Output only. Timestamp when this Model was uploaded into Vertex AI. + - !ruby/object:Api::Type::NestedObject + name: 'explanationSpec' + description: | + Specification of Model explanation. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'parameters' + description: | + Parameters to configure explaining for Model's predictions. + properties: + - !ruby/object:Api::Type::Array + name: 'outputIndices' + description: | + If populated, only returns attributions that have output_index contained in output_indices. It must be an ndarray of integers, with the same shape of the output it's explaining. If not populated, returns attributions for top_k indices of outputs. If neither top_k nor output_indices is populated, returns the argmax index of the outputs. Only applicable to Models that predict multiple outputs (e,g, multi-class Models that predict multiple classes). + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'examples' + description: | + Example-based explainability that returns the nearest neighbors from the provided dataset. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'presets' + description: | + Preset configuration for example-based explanations + properties: + - !ruby/object:Api::Type::Enum + name: 'modality' + description: | + The modality of the uploaded model, which automatically configures the distance measurement and feature normalization for the underlying example index and queries. If your model does not precisely fit one of these types, it is okay to choose the closest type. + values: + - :MODALITY_UNSPECIFIED + - :IMAGE + - :TEXT + - :TABULAR + - !ruby/object:Api::Type::Enum + name: 'query' + description: | + Preset option controlling parameters for speed-precision trade-off when querying for examples. If omitted, defaults to `PRECISE`. + values: + - :PRECISE + - :FAST + - !ruby/object:Api::Type::Integer + name: 'neighborCount' + description: | + The number of neighbors to return when querying for examples. + - !ruby/object:Api::Type::NestedObject + name: 'exampleGcsSource' + description: | + The Cloud Storage input instances. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'gcsSource' + description: | + The Google Cloud Storage location for the input content. + properties: + - !ruby/object:Api::Type::Array + name: 'uris' + description: | + Required. Google Cloud Storage URI(-s) to the input file(s). May contain wildcards. For more information on wildcards, see https://cloud.google.com/storage/docs/gsutil/addlhelp/WildcardNames. + item_type: Api::Type::String + - !ruby/object:Api::Type::Enum + name: 'dataFormat' + description: | + The format in which instances are given, if not specified, assume it's JSONL format. Currently only JSONL format is supported. + values: + - :DATA_FORMAT_UNSPECIFIED + - :JSONL + - !ruby/object:Api::Type::String + name: 'nearestNeighborSearchConfig' + description: | + The full configuration for the generated index, the semantics are the same as metadata and should match [NearestNeighborSearchConfig](https://cloud.google.com/vertex-ai/docs/explainable-ai/configuring-explanations-example-based#nearest-neighbor-search-config). + - !ruby/object:Api::Type::NestedObject + name: 'xraiAttribution' + description: | + An explanation method that redistributes Integrated Gradients attributions to segmented regions, taking advantage of the model's fully differentiable structure. Refer to this paper for more details: https://arxiv.org/abs/1906.02825 Supported only by image Models. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'smoothGradConfig' + description: | + Config for SmoothGrad approximation of gradients. When enabled, the gradients are approximated by averaging the gradients from noisy samples in the vicinity of the inputs. Adding noise can help improve the computed gradients. Refer to this paper for more details: https://arxiv.org/pdf/1706.03825.pdf + properties: + - !ruby/object:Api::Type::NestedObject + name: 'featureNoiseSigma' + description: | + Noise sigma by features. Noise sigma represents the standard deviation of the gaussian kernel that will be used to add noise to interpolated inputs prior to computing gradients. + properties: + - !ruby/object:Api::Type::Array + name: 'noiseSigma' + description: | + Noise sigma per feature. No noise is added to features that are not set. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'name' + description: | + The name of the input feature for which noise sigma is provided. The features are defined in explanation metadata inputs. + - !ruby/object:Api::Type::Integer + name: 'sigma' + description: | + This represents the standard deviation of the Gaussian kernel that will be used to add noise to the feature prior to computing gradients. Similar to noise_sigma but represents the noise added to the current feature. Defaults to 0.1. + - !ruby/object:Api::Type::Integer + name: 'noiseSigma' + description: | + This is a single float value and will be used to add noise to all the features. Use this field when all features are normalized to have the same distribution: scale to range [0, 1], [-1, 1] or z-scoring, where features are normalized to have 0-mean and 1-variance. Learn more about [normalization](https://developers.google.com/machine-learning/data-prep/transform/normalization). For best results the recommended value is about 10% - 20% of the standard deviation of the input feature. Refer to section 3.2 of the SmoothGrad paper: https://arxiv.org/pdf/1706.03825.pdf. Defaults to 0.1. If the distribution is different per feature, set feature_noise_sigma instead for each feature. + - !ruby/object:Api::Type::Integer + name: 'noisySampleCount' + description: | + The number of gradient samples to use for approximation. The higher this number, the more accurate the gradient is, but the runtime complexity increases by this factor as well. Valid range of its value is [1, 50]. Defaults to 3. + - !ruby/object:Api::Type::Integer + name: 'stepCount' + description: | + Required. The number of steps for approximating the path integral. A good value to start is 50 and gradually increase until the sum to diff property is met within the desired error range. Valid range of its value is [1, 100], inclusively. + - !ruby/object:Api::Type::NestedObject + name: 'blurBaselineConfig' + description: | + Config for blur baseline. When enabled, a linear path from the maximally blurred image to the input image is created. Using a blurred baseline instead of zero (black image) is motivated by the BlurIG approach explained here: https://arxiv.org/abs/2004.03383 + properties: + - !ruby/object:Api::Type::Integer + name: 'maxBlurSigma' + description: | + The standard deviation of the blur kernel for the blurred baseline. The same blurring parameter is used for both the height and the width dimension. If not set, the method defaults to the zero (i.e. black for images) baseline. + - !ruby/object:Api::Type::Integer + name: 'topK' + description: | + If populated, returns attributions for top K indices of outputs (defaults to 1). Only applies to Models that predicts more than one outputs (e,g, multi-class Models). When set to -1, returns explanations for all outputs. + - !ruby/object:Api::Type::NestedObject + name: 'integratedGradientsAttribution' + description: | + An attribution method that computes the Aumann-Shapley value taking advantage of the model's fully differentiable structure. Refer to this paper for more details: https://arxiv.org/abs/1703.01365 + properties: + - !ruby/object:Api::Type::Integer + name: 'stepCount' + description: | + Required. The number of steps for approximating the path integral. A good value to start is 50 and gradually increase until the sum to diff property is within the desired error range. Valid range of its value is [1, 100], inclusively. + - !ruby/object:Api::Type::NestedObject + name: 'smoothGradConfig' + description: | + Config for SmoothGrad approximation of gradients. When enabled, the gradients are approximated by averaging the gradients from noisy samples in the vicinity of the inputs. Adding noise can help improve the computed gradients. Refer to this paper for more details: https://arxiv.org/pdf/1706.03825.pdf + properties: + - !ruby/object:Api::Type::NestedObject + name: 'featureNoiseSigma' + description: | + Noise sigma by features. Noise sigma represents the standard deviation of the gaussian kernel that will be used to add noise to interpolated inputs prior to computing gradients. + properties: + - !ruby/object:Api::Type::Array + name: 'noiseSigma' + description: | + Noise sigma per feature. No noise is added to features that are not set. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'name' + description: | + The name of the input feature for which noise sigma is provided. The features are defined in explanation metadata inputs. + - !ruby/object:Api::Type::Integer + name: 'sigma' + description: | + This represents the standard deviation of the Gaussian kernel that will be used to add noise to the feature prior to computing gradients. Similar to noise_sigma but represents the noise added to the current feature. Defaults to 0.1. + - !ruby/object:Api::Type::Integer + name: 'noiseSigma' + description: | + This is a single float value and will be used to add noise to all the features. Use this field when all features are normalized to have the same distribution: scale to range [0, 1], [-1, 1] or z-scoring, where features are normalized to have 0-mean and 1-variance. Learn more about [normalization](https://developers.google.com/machine-learning/data-prep/transform/normalization). For best results the recommended value is about 10% - 20% of the standard deviation of the input feature. Refer to section 3.2 of the SmoothGrad paper: https://arxiv.org/pdf/1706.03825.pdf. Defaults to 0.1. If the distribution is different per feature, set feature_noise_sigma instead for each feature. + - !ruby/object:Api::Type::Integer + name: 'noisySampleCount' + description: | + The number of gradient samples to use for approximation. The higher this number, the more accurate the gradient is, but the runtime complexity increases by this factor as well. Valid range of its value is [1, 50]. Defaults to 3. + - !ruby/object:Api::Type::NestedObject + name: 'blurBaselineConfig' + description: | + Config for blur baseline. When enabled, a linear path from the maximally blurred image to the input image is created. Using a blurred baseline instead of zero (black image) is motivated by the BlurIG approach explained here: https://arxiv.org/abs/2004.03383 + properties: + - !ruby/object:Api::Type::Integer + name: 'maxBlurSigma' + description: | + The standard deviation of the blur kernel for the blurred baseline. The same blurring parameter is used for both the height and the width dimension. If not set, the method defaults to the zero (i.e. black for images) baseline. + - !ruby/object:Api::Type::NestedObject + name: 'sampledShapleyAttribution' + description: | + An attribution method that approximates Shapley values for features that contribute to the label being predicted. A sampling strategy is used to approximate the value rather than considering all subsets of features. + properties: + - !ruby/object:Api::Type::Integer + name: 'pathCount' + description: | + Required. The number of feature permutations to consider when approximating the Shapley values. Valid range of its value is [1, 50], inclusively. + - !ruby/object:Api::Type::NestedObject + name: 'metadata' + description: | + Metadata describing the Model's input and output for explanation. + properties: + - !ruby/object:Api::Type::String + name: 'featureAttributionsSchemaUri' + description: | + Points to a YAML file stored on Google Cloud Storage describing the format of the feature attributions. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). AutoML tabular Models always have this field populated by Vertex AI. Note: The URI given on output may be different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. + - !ruby/object:Api::Type::String + name: 'latentSpaceSource' + description: | + Name of the source to generate embeddings for example based explanations. + - !ruby/object:Api::Type::NestedObject + name: 'outputs' + description: | + Required. Map from output names to output metadata. For Vertex AI-provided Tensorflow images, keys can be any user defined string that consists of any UTF-8 characters. For custom images, keys are the name of the output field in the prediction to be explained. Currently only one key is allowed. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + Metadata of the prediction output to be explained. + - !ruby/object:Api::Type::NestedObject + name: 'inputs' + description: | + Required. Map from feature names to feature input metadata. Keys are the name of the features. Values are the specification of the feature. An empty InputMetadata is valid. It describes a text feature which has the name specified as the key in ExplanationMetadata.inputs. The baseline of the empty feature is chosen by Vertex AI. For Vertex AI-provided Tensorflow images, the key can be any friendly name of the feature. Once specified, featureAttributions are keyed by this key (if not grouped with another feature). For custom images, the key must match with the key in instance. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + Metadata of the input of a feature. Fields other than InputMetadata.input_baselines are applicable only for Models that are using Vertex AI-provided images for Tensorflow. + - !ruby/object:Api::Type::NestedObject + name: 'encryptionSpec' + description: | + Represents a customer-managed encryption key spec that can be applied to a top-level resource. + properties: + - !ruby/object:Api::Type::String + name: 'kmsKeyName' + description: | + Required. The Cloud KMS resource identifier of the customer managed encryption key used to protect a resource. Has the form: `projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key`. The key needs to be in the same region as where the compute resource is created. + - !ruby/object:Api::Type::String + name: 'pipelineJob' + description: | + Optional. This field is populated if the model is produced by a pipeline job. + - !ruby/object:Api::Type::NestedObject + name: 'predictSchemata' + description: | + Contains the schemata used in Model's predictions and explanations via PredictionService.Predict, PredictionService.Explain and BatchPredictionJob. + properties: + - !ruby/object:Api::Type::String + name: 'instanceSchemaUri' + description: | + Immutable. Points to a YAML file stored on Google Cloud Storage describing the format of a single instance, which are used in PredictRequest.instances, ExplainRequest.instances and BatchPredictionJob.input_config. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). AutoML Models always have this field populated by Vertex AI. Note: The URI given on output will be immutable and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. + - !ruby/object:Api::Type::String + name: 'parametersSchemaUri' + description: | + Immutable. Points to a YAML file stored on Google Cloud Storage describing the parameters of prediction and explanation via PredictRequest.parameters, ExplainRequest.parameters and BatchPredictionJob.model_parameters. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). AutoML Models always have this field populated by Vertex AI, if no parameters are supported, then it is set to an empty string. Note: The URI given on output will be immutable and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. + - !ruby/object:Api::Type::String + name: 'predictionSchemaUri' + description: | + Immutable. Points to a YAML file stored on Google Cloud Storage describing the format of a single prediction produced by this Model, which are returned via PredictResponse.predictions, ExplainResponse.explanations, and BatchPredictionJob.output_config. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). AutoML Models always have this field populated by Vertex AI. Note: The URI given on output will be immutable and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. + - !ruby/object:Api::Type::String + name: 'versionUpdateTime' + description: | + Output only. Timestamp when this version was most recently updated. + - !ruby/object:Api::Type::Array + name: 'supportedExportFormats' + description: | + Output only. The formats in which this Model may be exported. If empty, this Model is not available for export. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::Array + name: 'exportableContents' + description: | + Output only. The content of this Model that may be exported. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'id' + description: | + Output only. The ID of the export format. The possible format IDs are: * `tflite` Used for Android mobile devices. * `edgetpu-tflite` Used for [Edge TPU](https://cloud.google.com/edge-tpu/) devices. * `tf-saved-model` A tensorflow model in SavedModel format. * `tf-js` A [TensorFlow.js](https://www.tensorflow.org/js) model that can be used in the browser and in Node.js using JavaScript. * `core-ml` Used for iOS mobile devices. * `custom-trained` A Model that was uploaded or trained by custom code. + - !ruby/object:Api::Type::NestedObject + name: 'originalModelInfo' + description: | + Contains information about the original Model if this Model is a copy. + properties: + - !ruby/object:Api::Type::String + name: 'model' + description: | + Output only. The resource name of the Model this Model is a copy of, including the revision. Format: `projects/{project}/locations/{location}/models/{model_id}@{version_id}` + - !ruby/object:Api::Type::String + name: 'metadataArtifact' + description: | + Output only. The resource name of the Artifact that was created in MetadataStore when creating the Model. The Artifact resource name pattern is `projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}`. + - !ruby/object:Api::Type::Array + name: 'supportedInputStorageFormats' + description: | + Output only. The formats this Model supports in BatchPredictionJob.input_config. If PredictSchemata.instance_schema_uri exists, the instances should be given as per that schema. The possible formats are: * `jsonl` The JSON Lines format, where each instance is a single line. Uses GcsSource. * `csv` The CSV format, where each instance is a single comma-separated line. The first line in the file is the header, containing comma-separated field names. Uses GcsSource. * `tf-record` The TFRecord format, where each instance is a single record in tfrecord syntax. Uses GcsSource. * `tf-record-gzip` Similar to `tf-record`, but the file is gzipped. Uses GcsSource. * `bigquery` Each instance is a single row in BigQuery. Uses BigQuerySource. * `file-list` Each line of the file is the location of an instance to process, uses `gcs_source` field of the InputConfig object. If this Model doesn't support any of these formats it means it cannot be used with a BatchPredictionJob. However, if it has supported_deployment_resources_types, it could serve online predictions by using PredictionService.Predict or PredictionService.Explain. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'metadataSchemaUri' + description: | + Immutable. Points to a YAML file stored on Google Cloud Storage describing additional information about the Model, that is specific to it. Unset if the Model does not have any additional information. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). AutoML Models always have this field populated by Vertex AI, if no additional metadata is needed, this field is set to an empty string. Note: The URI given on output will be immutable and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. + - !ruby/object:Api::Type::NestedObject + name: 'containerSpec' + description: | + Specification of a container for serving predictions. Some fields in this message correspond to fields in the [Kubernetes Container v1 core specification](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#container-v1-core). + properties: + - !ruby/object:Api::Type::String + name: 'predictRoute' + description: | + Immutable. HTTP path on the container to send prediction requests to. Vertex AI forwards requests sent using projects.locations.endpoints.predict to this path on the container's IP address and port. Vertex AI then returns the container's response in the API response. For example, if you set this field to `/foo`, then when Vertex AI receives a prediction request, it forwards the request body in a POST request to the `/foo` path on the port of your container specified by the first value of this `ModelContainerSpec`'s ports field. If you don't specify this field, it defaults to the following value when you deploy this Model to an Endpoint: /v1/endpoints/ENDPOINT/deployedModels/DEPLOYED_MODEL:predict The placeholders in this value are replaced as follows: * ENDPOINT: The last segment (following `endpoints/`)of the Endpoint.name][] field of the Endpoint where this Model has been deployed. (Vertex AI makes this value available to your container code as the [`AIP_ENDPOINT_ID` environment variable](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables).) * DEPLOYED_MODEL: DeployedModel.id of the `DeployedModel`. (Vertex AI makes this value available to your container code as the [`AIP_DEPLOYED_MODEL_ID` environment variable](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables).) + - !ruby/object:Api::Type::String + name: 'imageUri' + description: | + Required. Immutable. URI of the Docker image to be used as the custom container for serving predictions. This URI must identify an image in Artifact Registry or Container Registry. Learn more about the [container publishing requirements](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#publishing), including permissions requirements for the Vertex AI Service Agent. The container image is ingested upon ModelService.UploadModel, stored internally, and this original path is afterwards not used. To learn about the requirements for the Docker image itself, see [Custom container requirements](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#). You can use the URI to one of Vertex AI's [pre-built container images for prediction](https://cloud.google.com/vertex-ai/docs/predictions/pre-built-containers) in this field. + - !ruby/object:Api::Type::Array + name: 'env' + description: | + Immutable. List of environment variables to set in the container. After the container starts running, code running in the container can read these environment variables. Additionally, the command and args fields can reference these variables. Later entries in this list can also reference earlier entries. For example, the following example sets the variable `VAR_2` to have the value `foo bar`: ```json [ { "name": "VAR_1", "value": "foo" }, { "name": "VAR_2", "value": "$(VAR_1) bar" } ] ``` If you switch the order of the variables in the example, then the expansion does not occur. This field corresponds to the `env` field of the Kubernetes Containers [v1 core API](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#container-v1-core). + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'name' + description: | + Required. Name of the environment variable. Must be a valid C identifier. + - !ruby/object:Api::Type::String + name: 'value' + description: | + Required. Variables that reference a $(VAR_NAME) are expanded using the previous defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. + - !ruby/object:Api::Type::Array + name: 'args' + description: | + Immutable. Specifies arguments for the command that runs when the container starts. This overrides the container's [`CMD`](https://docs.docker.com/engine/reference/builder/#cmd). Specify this field as an array of executable and arguments, similar to a Docker `CMD`'s "default parameters" form. If you don't specify this field but do specify the command field, then the command from the `command` field runs without any additional arguments. See the [Kubernetes documentation about how the `command` and `args` fields interact with a container's `ENTRYPOINT` and `CMD`](https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#notes). If you don't specify this field and don't specify the `command` field, then the container's [`ENTRYPOINT`](https://docs.docker.com/engine/reference/builder/#cmd) and `CMD` determine what runs based on their default behavior. See the Docker documentation about [how `CMD` and `ENTRYPOINT` interact](https://docs.docker.com/engine/reference/builder/#understand-how-cmd-and-entrypoint-interact). In this field, you can reference [environment variables set by Vertex AI](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables) and environment variables set in the env field. You cannot reference environment variables set in the Docker image. In order for environment variables to be expanded, reference them by using the following syntax: $( VARIABLE_NAME) Note that this differs from Bash variable expansion, which does not use parentheses. If a variable cannot be resolved, the reference in the input string is used unchanged. To avoid variable expansion, you can escape this syntax with `$$`; for example: $$(VARIABLE_NAME) This field corresponds to the `args` field of the Kubernetes Containers [v1 core API](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#container-v1-core). + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'command' + description: | + Immutable. Specifies the command that runs when the container starts. This overrides the container's [ENTRYPOINT](https://docs.docker.com/engine/reference/builder/#entrypoint). Specify this field as an array of executable and arguments, similar to a Docker `ENTRYPOINT`'s "exec" form, not its "shell" form. If you do not specify this field, then the container's `ENTRYPOINT` runs, in conjunction with the args field or the container's [`CMD`](https://docs.docker.com/engine/reference/builder/#cmd), if either exists. If this field is not specified and the container does not have an `ENTRYPOINT`, then refer to the Docker documentation about [how `CMD` and `ENTRYPOINT` interact](https://docs.docker.com/engine/reference/builder/#understand-how-cmd-and-entrypoint-interact). If you specify this field, then you can also specify the `args` field to provide additional arguments for this command. However, if you specify this field, then the container's `CMD` is ignored. See the [Kubernetes documentation about how the `command` and `args` fields interact with a container's `ENTRYPOINT` and `CMD`](https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#notes). In this field, you can reference [environment variables set by Vertex AI](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables) and environment variables set in the env field. You cannot reference environment variables set in the Docker image. In order for environment variables to be expanded, reference them by using the following syntax: $( VARIABLE_NAME) Note that this differs from Bash variable expansion, which does not use parentheses. If a variable cannot be resolved, the reference in the input string is used unchanged. To avoid variable expansion, you can escape this syntax with `$$`; for example: $$(VARIABLE_NAME) This field corresponds to the `command` field of the Kubernetes Containers [v1 core API](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#container-v1-core). + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'ports' + description: | + Immutable. List of ports to expose from the container. Vertex AI sends any prediction requests that it receives to the first port on this list. Vertex AI also sends [liveness and health checks](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#liveness) to this port. If you do not specify this field, it defaults to following value: ```json [ { "containerPort": 8080 } ] ``` Vertex AI does not use ports other than the first one listed. This field corresponds to the `ports` field of the Kubernetes Containers [v1 core API](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#container-v1-core). + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::Integer + name: 'containerPort' + description: | + The number of the port to expose on the pod's IP address. Must be a valid port number, between 1 and 65535 inclusive. + - !ruby/object:Api::Type::String + name: 'healthRoute' + description: | + Immutable. HTTP path on the container to send health checks to. Vertex AI intermittently sends GET requests to this path on the container's IP address and port to check that the container is healthy. Read more about [health checks](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#health). For example, if you set this field to `/bar`, then Vertex AI intermittently sends a GET request to the `/bar` path on the port of your container specified by the first value of this `ModelContainerSpec`'s ports field. If you don't specify this field, it defaults to the following value when you deploy this Model to an Endpoint: /v1/endpoints/ENDPOINT/deployedModels/ DEPLOYED_MODEL:predict The placeholders in this value are replaced as follows: * ENDPOINT: The last segment (following `endpoints/`)of the Endpoint.name][] field of the Endpoint where this Model has been deployed. (Vertex AI makes this value available to your container code as the [`AIP_ENDPOINT_ID` environment variable](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables).) * DEPLOYED_MODEL: DeployedModel.id of the `DeployedModel`. (Vertex AI makes this value available to your container code as the [`AIP_DEPLOYED_MODEL_ID` environment variable](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables).) + - !ruby/object:Api::Type::String + name: 'versionId' + description: | + Output only. Immutable. The version ID of the model. A new version is committed when a new model version is uploaded or trained under an existing model id. It is an auto-incrementing decimal number in string representation. + - !ruby/object:Api::Type::String + name: 'artifactUri' + description: | + Immutable. The path to the directory containing the Model artifact and any of its supporting files. Not present for AutoML Models or Large Models. + - !ruby/object:Api::Type::String + name: 'trainingPipeline' + description: | + Output only. The resource name of the TrainingPipeline that uploaded this Model, if any. + - !ruby/object:Api::Type::String + name: 'displayName' + description: | + Required. The display name of the Model. The name can be up to 128 characters long and can consist of any UTF-8 characters. + - !ruby/object:Api::Type::Array + name: 'supportedDeploymentResourcesTypes' + description: | + Output only. When this Model is deployed, its prediction resources are described by the `prediction_resources` field of the Endpoint.deployed_models object. Because not all Models support all resource configuration types, the configuration types this Model supports are listed here. If no configuration types are listed, the Model cannot be deployed to an Endpoint and does not support online predictions (PredictionService.Predict or PredictionService.Explain). Such a Model can serve predictions by using a BatchPredictionJob, if it has at least one entry each in supported_input_storage_formats and supported_output_storage_formats. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'supportedOutputStorageFormats' + description: | + Output only. The formats this Model supports in BatchPredictionJob.output_config. If both PredictSchemata.instance_schema_uri and PredictSchemata.prediction_schema_uri exist, the predictions are returned together with their instances. In other words, the prediction has the original instance data first, followed by the actual prediction content (as per the schema). The possible formats are: * `jsonl` The JSON Lines format, where each prediction is a single line. Uses GcsDestination. * `csv` The CSV format, where each prediction is a single comma-separated line. The first line in the file is the header, containing comma-separated field names. Uses GcsDestination. * `bigquery` Each prediction is a single row in a BigQuery table, uses BigQueryDestination . If this Model doesn't support any of these formats it means it cannot be used with a BatchPredictionJob. However, if it has supported_deployment_resources_types, it could serve online predictions by using PredictionService.Predict or PredictionService.Explain. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'versionAliases' + description: | + User provided version aliases so that a model version can be referenced via alias (i.e. `projects/{project}/locations/{location}/models/{model_id}@{version_alias}` instead of auto-generated version id (i.e. `projects/{project}/locations/{location}/models/{model_id}@{version_id})`. The format is a-z{0,126}[a-z0-9] to distinguish from version_id. A default version alias will be created for the first version of the model, and there must be exactly one default version alias for a model. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'versionCreateTime' + description: | + Output only. Timestamp when this version was created. + - !ruby/object:Api::Type::String + name: 'versionDescription' + description: | + The description of this version. + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + The labels with user-defined metadata to organize your Models. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + + + + - !ruby/object:Api::Resource + name: BatchPredictionJob + base_url: '{{parent}}/batchPredictionJobs' + self_link: '{{name}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/vertex-ai/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + A job that uses a Model to produce predictions on multiple input instances. If predictions for significant portion of the instances fail, the job may finish without attempting predictions for all remaining instances. + properties: + + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Output only. Time when the BatchPredictionJob was created. + - !ruby/object:Api::Type::String + name: 'modelParameters' + description: | + The parameters that govern the predictions. The schema of the parameters may be specified via the Model's PredictSchemata's parameters_schema_uri. + - !ruby/object:Api::Type::NestedObject + name: 'instanceConfig' + description: | + Configuration defining how to transform batch prediction input instances to the instances that the Model accepts. + properties: + - !ruby/object:Api::Type::Array + name: 'includedFields' + description: | + Fields that will be included in the prediction instance that is sent to the Model. If instance_type is `array`, the order of field names in included_fields also determines the order of the values in the array. When included_fields is populated, excluded_fields must be empty. The input must be JSONL with objects at each line, CSV, BigQuery or TfRecord. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'instanceType' + description: | + The format of the instance that the Model accepts. Vertex AI will convert compatible batch prediction input instance formats to the specified format. Supported values are: * `object`: Each input is converted to JSON object format. * For `bigquery`, each row is converted to an object. * For `jsonl`, each line of the JSONL input must be an object. * Does not apply to `csv`, `file-list`, `tf-record`, or `tf-record-gzip`. * `array`: Each input is converted to JSON array format. * For `bigquery`, each row is converted to an array. The order of columns is determined by the BigQuery column order, unless included_fields is populated. included_fields must be populated for specifying field orders. * For `jsonl`, if each line of the JSONL input is an object, included_fields must be populated for specifying field orders. * Does not apply to `csv`, `file-list`, `tf-record`, or `tf-record-gzip`. If not specified, Vertex AI converts the batch prediction input as follows: * For `bigquery` and `csv`, the behavior is the same as `array`. The order of columns is the same as defined in the file or table, unless included_fields is populated. * For `jsonl`, the prediction instance format is determined by each line of the input. * For `tf-record`/`tf-record-gzip`, each record will be converted to an object in the format of `{"b64": }`, where `` is the Base64-encoded string of the content of the record. * For `file-list`, each file in the list will be converted to an object in the format of `{"b64": }`, where `` is the Base64-encoded string of the content of the file. + - !ruby/object:Api::Type::Array + name: 'excludedFields' + description: | + Fields that will be excluded in the prediction instance that is sent to the Model. Excluded will be attached to the batch prediction output if key_field is not specified. When excluded_fields is populated, included_fields must be empty. The input must be JSONL with objects at each line, CSV, BigQuery or TfRecord. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'keyField' + description: | + The name of the field that is considered as a key. The values identified by the key field is not included in the transformed instances that is sent to the Model. This is similar to specifying this name of the field in excluded_fields. In addition, the batch prediction output will not include the instances. Instead the output will only include the value of the key field, in a field named `key` in the output: * For `jsonl` output format, the output will have a `key` field instead of the `instance` field. * For `csv`/`bigquery` output format, the output will have have a `key` column instead of the instance feature columns. The input must be JSONL with objects at each line, CSV, BigQuery or TfRecord. + - !ruby/object:Api::Type::String + name: 'modelVersionId' + description: | + Output only. The version ID of the Model that produces the predictions via this job. + - !ruby/object:Api::Type::NestedObject + name: 'dedicatedResources' + description: | + A description of resources that are used for performing batch operations, are dedicated to a Model, and need manual configuration. + properties: + - !ruby/object:Api::Type::Integer + name: 'startingReplicaCount' + description: | + Immutable. The number of machine replicas used at the start of the batch operation. If not set, Vertex AI decides starting number, not greater than max_replica_count + - !ruby/object:Api::Type::Integer + name: 'maxReplicaCount' + description: | + Immutable. The maximum number of machine replicas the batch operation may be scaled to. The default value is 10. + - !ruby/object:Api::Type::NestedObject + name: 'machineSpec' + description: | + Specification of a single machine. + properties: + - !ruby/object:Api::Type::Enum + name: 'acceleratorType' + description: | + Immutable. The type of accelerator(s) that may be attached to the machine as per accelerator_count. + values: + - :ACCELERATOR_TYPE_UNSPECIFIED + - :NVIDIA_TESLA_K80 + - :NVIDIA_TESLA_P100 + - :NVIDIA_TESLA_V100 + - :NVIDIA_TESLA_P4 + - :NVIDIA_TESLA_T4 + - :NVIDIA_TESLA_A100 + - :NVIDIA_A100_80GB + - :NVIDIA_L4 + - :TPU_V2 + - :TPU_V3 + - :TPU_V4_POD + - !ruby/object:Api::Type::String + name: 'machineType' + description: | + Immutable. The type of the machine. See the [list of machine types supported for prediction](https://cloud.google.com/vertex-ai/docs/predictions/configure-compute#machine-types) See the [list of machine types supported for custom training](https://cloud.google.com/vertex-ai/docs/training/configure-compute#machine-types). For DeployedModel this field is optional, and the default value is `n1-standard-2`. For BatchPredictionJob or as part of WorkerPoolSpec this field is required. + - !ruby/object:Api::Type::Integer + name: 'acceleratorCount' + description: | + The number of accelerators to attach to the machine. + - !ruby/object:Api::Type::NestedObject + name: 'outputInfo' + description: | + Further describes this job's output. Supplements output_config. + properties: + - !ruby/object:Api::Type::String + name: 'bigqueryOutputDataset' + description: | + Output only. The path of the BigQuery dataset created, in `bq://projectId.bqDatasetId` format, into which the prediction output is written. + - !ruby/object:Api::Type::String + name: 'gcsOutputDirectory' + description: | + Output only. The full path of the Cloud Storage directory created, into which the prediction output is written. + - !ruby/object:Api::Type::String + name: 'bigqueryOutputTable' + description: | + Output only. The name of the BigQuery table created, in `predictions_` format, into which the prediction output is written. Can be used by UI to generate the BigQuery output path, for example. + - !ruby/object:Api::Type::Boolean + name: 'disableContainerLogging' + description: | + For custom-trained Models and AutoML Tabular Models, the container of the DeployedModel instances will send `stderr` and `stdout` streams to Cloud Logging by default. Please note that the logs incur cost, which are subject to [Cloud Logging pricing](https://cloud.google.com/logging/pricing). User can disable container logging by setting this flag to true. + - !ruby/object:Api::Type::NestedObject + name: 'explanationSpec' + description: | + Specification of Model explanation. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'parameters' + description: | + Parameters to configure explaining for Model's predictions. + properties: + - !ruby/object:Api::Type::Array + name: 'outputIndices' + description: | + If populated, only returns attributions that have output_index contained in output_indices. It must be an ndarray of integers, with the same shape of the output it's explaining. If not populated, returns attributions for top_k indices of outputs. If neither top_k nor output_indices is populated, returns the argmax index of the outputs. Only applicable to Models that predict multiple outputs (e,g, multi-class Models that predict multiple classes). + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'examples' + description: | + Example-based explainability that returns the nearest neighbors from the provided dataset. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'presets' + description: | + Preset configuration for example-based explanations + properties: + - !ruby/object:Api::Type::Enum + name: 'modality' + description: | + The modality of the uploaded model, which automatically configures the distance measurement and feature normalization for the underlying example index and queries. If your model does not precisely fit one of these types, it is okay to choose the closest type. + values: + - :MODALITY_UNSPECIFIED + - :IMAGE + - :TEXT + - :TABULAR + - !ruby/object:Api::Type::Enum + name: 'query' + description: | + Preset option controlling parameters for speed-precision trade-off when querying for examples. If omitted, defaults to `PRECISE`. + values: + - :PRECISE + - :FAST + - !ruby/object:Api::Type::Integer + name: 'neighborCount' + description: | + The number of neighbors to return when querying for examples. + - !ruby/object:Api::Type::NestedObject + name: 'exampleGcsSource' + description: | + The Cloud Storage input instances. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'gcsSource' + description: | + The Google Cloud Storage location for the input content. + properties: + - !ruby/object:Api::Type::Array + name: 'uris' + description: | + Required. Google Cloud Storage URI(-s) to the input file(s). May contain wildcards. For more information on wildcards, see https://cloud.google.com/storage/docs/gsutil/addlhelp/WildcardNames. + item_type: Api::Type::String + - !ruby/object:Api::Type::Enum + name: 'dataFormat' + description: | + The format in which instances are given, if not specified, assume it's JSONL format. Currently only JSONL format is supported. + values: + - :DATA_FORMAT_UNSPECIFIED + - :JSONL + - !ruby/object:Api::Type::String + name: 'nearestNeighborSearchConfig' + description: | + The full configuration for the generated index, the semantics are the same as metadata and should match [NearestNeighborSearchConfig](https://cloud.google.com/vertex-ai/docs/explainable-ai/configuring-explanations-example-based#nearest-neighbor-search-config). + - !ruby/object:Api::Type::NestedObject + name: 'xraiAttribution' + description: | + An explanation method that redistributes Integrated Gradients attributions to segmented regions, taking advantage of the model's fully differentiable structure. Refer to this paper for more details: https://arxiv.org/abs/1906.02825 Supported only by image Models. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'smoothGradConfig' + description: | + Config for SmoothGrad approximation of gradients. When enabled, the gradients are approximated by averaging the gradients from noisy samples in the vicinity of the inputs. Adding noise can help improve the computed gradients. Refer to this paper for more details: https://arxiv.org/pdf/1706.03825.pdf + properties: + - !ruby/object:Api::Type::NestedObject + name: 'featureNoiseSigma' + description: | + Noise sigma by features. Noise sigma represents the standard deviation of the gaussian kernel that will be used to add noise to interpolated inputs prior to computing gradients. + properties: + - !ruby/object:Api::Type::Array + name: 'noiseSigma' + description: | + Noise sigma per feature. No noise is added to features that are not set. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'name' + description: | + The name of the input feature for which noise sigma is provided. The features are defined in explanation metadata inputs. + - !ruby/object:Api::Type::Integer + name: 'sigma' + description: | + This represents the standard deviation of the Gaussian kernel that will be used to add noise to the feature prior to computing gradients. Similar to noise_sigma but represents the noise added to the current feature. Defaults to 0.1. + - !ruby/object:Api::Type::Integer + name: 'noiseSigma' + description: | + This is a single float value and will be used to add noise to all the features. Use this field when all features are normalized to have the same distribution: scale to range [0, 1], [-1, 1] or z-scoring, where features are normalized to have 0-mean and 1-variance. Learn more about [normalization](https://developers.google.com/machine-learning/data-prep/transform/normalization). For best results the recommended value is about 10% - 20% of the standard deviation of the input feature. Refer to section 3.2 of the SmoothGrad paper: https://arxiv.org/pdf/1706.03825.pdf. Defaults to 0.1. If the distribution is different per feature, set feature_noise_sigma instead for each feature. + - !ruby/object:Api::Type::Integer + name: 'noisySampleCount' + description: | + The number of gradient samples to use for approximation. The higher this number, the more accurate the gradient is, but the runtime complexity increases by this factor as well. Valid range of its value is [1, 50]. Defaults to 3. + - !ruby/object:Api::Type::Integer + name: 'stepCount' + description: | + Required. The number of steps for approximating the path integral. A good value to start is 50 and gradually increase until the sum to diff property is met within the desired error range. Valid range of its value is [1, 100], inclusively. + - !ruby/object:Api::Type::NestedObject + name: 'blurBaselineConfig' + description: | + Config for blur baseline. When enabled, a linear path from the maximally blurred image to the input image is created. Using a blurred baseline instead of zero (black image) is motivated by the BlurIG approach explained here: https://arxiv.org/abs/2004.03383 + properties: + - !ruby/object:Api::Type::Integer + name: 'maxBlurSigma' + description: | + The standard deviation of the blur kernel for the blurred baseline. The same blurring parameter is used for both the height and the width dimension. If not set, the method defaults to the zero (i.e. black for images) baseline. + - !ruby/object:Api::Type::Integer + name: 'topK' + description: | + If populated, returns attributions for top K indices of outputs (defaults to 1). Only applies to Models that predicts more than one outputs (e,g, multi-class Models). When set to -1, returns explanations for all outputs. + - !ruby/object:Api::Type::NestedObject + name: 'integratedGradientsAttribution' + description: | + An attribution method that computes the Aumann-Shapley value taking advantage of the model's fully differentiable structure. Refer to this paper for more details: https://arxiv.org/abs/1703.01365 + properties: + - !ruby/object:Api::Type::Integer + name: 'stepCount' + description: | + Required. The number of steps for approximating the path integral. A good value to start is 50 and gradually increase until the sum to diff property is within the desired error range. Valid range of its value is [1, 100], inclusively. + - !ruby/object:Api::Type::NestedObject + name: 'smoothGradConfig' + description: | + Config for SmoothGrad approximation of gradients. When enabled, the gradients are approximated by averaging the gradients from noisy samples in the vicinity of the inputs. Adding noise can help improve the computed gradients. Refer to this paper for more details: https://arxiv.org/pdf/1706.03825.pdf + properties: + - !ruby/object:Api::Type::NestedObject + name: 'featureNoiseSigma' + description: | + Noise sigma by features. Noise sigma represents the standard deviation of the gaussian kernel that will be used to add noise to interpolated inputs prior to computing gradients. + properties: + - !ruby/object:Api::Type::Array + name: 'noiseSigma' + description: | + Noise sigma per feature. No noise is added to features that are not set. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'name' + description: | + The name of the input feature for which noise sigma is provided. The features are defined in explanation metadata inputs. + - !ruby/object:Api::Type::Integer + name: 'sigma' + description: | + This represents the standard deviation of the Gaussian kernel that will be used to add noise to the feature prior to computing gradients. Similar to noise_sigma but represents the noise added to the current feature. Defaults to 0.1. + - !ruby/object:Api::Type::Integer + name: 'noiseSigma' + description: | + This is a single float value and will be used to add noise to all the features. Use this field when all features are normalized to have the same distribution: scale to range [0, 1], [-1, 1] or z-scoring, where features are normalized to have 0-mean and 1-variance. Learn more about [normalization](https://developers.google.com/machine-learning/data-prep/transform/normalization). For best results the recommended value is about 10% - 20% of the standard deviation of the input feature. Refer to section 3.2 of the SmoothGrad paper: https://arxiv.org/pdf/1706.03825.pdf. Defaults to 0.1. If the distribution is different per feature, set feature_noise_sigma instead for each feature. + - !ruby/object:Api::Type::Integer + name: 'noisySampleCount' + description: | + The number of gradient samples to use for approximation. The higher this number, the more accurate the gradient is, but the runtime complexity increases by this factor as well. Valid range of its value is [1, 50]. Defaults to 3. + - !ruby/object:Api::Type::NestedObject + name: 'blurBaselineConfig' + description: | + Config for blur baseline. When enabled, a linear path from the maximally blurred image to the input image is created. Using a blurred baseline instead of zero (black image) is motivated by the BlurIG approach explained here: https://arxiv.org/abs/2004.03383 + properties: + - !ruby/object:Api::Type::Integer + name: 'maxBlurSigma' + description: | + The standard deviation of the blur kernel for the blurred baseline. The same blurring parameter is used for both the height and the width dimension. If not set, the method defaults to the zero (i.e. black for images) baseline. + - !ruby/object:Api::Type::NestedObject + name: 'sampledShapleyAttribution' + description: | + An attribution method that approximates Shapley values for features that contribute to the label being predicted. A sampling strategy is used to approximate the value rather than considering all subsets of features. + properties: + - !ruby/object:Api::Type::Integer + name: 'pathCount' + description: | + Required. The number of feature permutations to consider when approximating the Shapley values. Valid range of its value is [1, 50], inclusively. + - !ruby/object:Api::Type::NestedObject + name: 'metadata' + description: | + Metadata describing the Model's input and output for explanation. + properties: + - !ruby/object:Api::Type::String + name: 'featureAttributionsSchemaUri' + description: | + Points to a YAML file stored on Google Cloud Storage describing the format of the feature attributions. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). AutoML tabular Models always have this field populated by Vertex AI. Note: The URI given on output may be different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. + - !ruby/object:Api::Type::String + name: 'latentSpaceSource' + description: | + Name of the source to generate embeddings for example based explanations. + - !ruby/object:Api::Type::NestedObject + name: 'outputs' + description: | + Required. Map from output names to output metadata. For Vertex AI-provided Tensorflow images, keys can be any user defined string that consists of any UTF-8 characters. For custom images, keys are the name of the output field in the prediction to be explained. Currently only one key is allowed. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + Metadata of the prediction output to be explained. + - !ruby/object:Api::Type::NestedObject + name: 'inputs' + description: | + Required. Map from feature names to feature input metadata. Keys are the name of the features. Values are the specification of the feature. An empty InputMetadata is valid. It describes a text feature which has the name specified as the key in ExplanationMetadata.inputs. The baseline of the empty feature is chosen by Vertex AI. For Vertex AI-provided Tensorflow images, the key can be any friendly name of the feature. Once specified, featureAttributions are keyed by this key (if not grouped with another feature). For custom images, the key must match with the key in instance. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + Metadata of the input of a feature. Fields other than InputMetadata.input_baselines are applicable only for Models that are using Vertex AI-provided images for Tensorflow. + - !ruby/object:Api::Type::String + name: 'endTime' + description: | + Output only. Time when the BatchPredictionJob entered any of the following states: `JOB_STATE_SUCCEEDED`, `JOB_STATE_FAILED`, `JOB_STATE_CANCELLED`. + - !ruby/object:Api::Type::Boolean + name: 'generateExplanation' + description: | + Generate explanation with the batch prediction results. When set to `true`, the batch prediction output changes based on the `predictions_format` field of the BatchPredictionJob.output_config object: * `bigquery`: output includes a column named `explanation`. The value is a struct that conforms to the Explanation object. * `jsonl`: The JSON objects on each line include an additional entry keyed `explanation`. The value of the entry is a JSON object that conforms to the Explanation object. * `csv`: Generating explanations for CSV format is not supported. If this field is set to true, either the Model.explanation_spec or explanation_spec must be populated. + - !ruby/object:Api::Type::NestedObject + name: 'resourcesConsumed' + description: | + Statistics information about resource consumption. + properties: + - !ruby/object:Api::Type::Integer + name: 'replicaHours' + description: | + Output only. The number of replica hours used. Note that many replicas may run in parallel, and additionally any given work may be queued for some time. Therefore this value is not strictly related to wall time. + - !ruby/object:Api::Type::NestedObject + name: 'error' + description: | + The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). + properties: + - !ruby/object:Api::Type::Array + name: 'details' + description: | + A list of messages that carry the error details. There is a common set of message types for APIs to use. + item_type: Api::Type::String + - !ruby/object:Api::Type::Integer + name: 'code' + description: | + The status code, which should be an enum value of google.rpc.Code. + - !ruby/object:Api::Type::String + name: 'message' + description: | + A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client. + - !ruby/object:Api::Type::NestedObject + name: 'inputConfig' + description: | + Configures the input to BatchPredictionJob. See Model.supported_input_storage_formats for Model's supported input formats, and how instances should be expressed via any of them. + properties: + - !ruby/object:Api::Type::String + name: 'instancesFormat' + description: | + Required. The format in which instances are given, must be one of the Model's supported_input_storage_formats. + - !ruby/object:Api::Type::NestedObject + name: 'bigquerySource' + description: | + The BigQuery location for the input content. + properties: + - !ruby/object:Api::Type::String + name: 'inputUri' + description: | + Required. BigQuery URI to a table, up to 2000 characters long. Accepted forms: * BigQuery path. For example: `bq://projectId.bqDatasetId.bqTableId`. + - !ruby/object:Api::Type::NestedObject + name: 'gcsSource' + description: | + The Google Cloud Storage location for the input content. + properties: + - !ruby/object:Api::Type::Array + name: 'uris' + description: | + Required. Google Cloud Storage URI(-s) to the input file(s). May contain wildcards. For more information on wildcards, see https://cloud.google.com/storage/docs/gsutil/addlhelp/WildcardNames. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'unmanagedContainerModel' + description: | + Contains model information necessary to perform batch prediction without requiring a full model import. + properties: + - !ruby/object:Api::Type::String + name: 'artifactUri' + description: | + The path to the directory containing the Model artifact and any of its supporting files. + - !ruby/object:Api::Type::NestedObject + name: 'predictSchemata' + description: | + Contains the schemata used in Model's predictions and explanations via PredictionService.Predict, PredictionService.Explain and BatchPredictionJob. + properties: + - !ruby/object:Api::Type::String + name: 'instanceSchemaUri' + description: | + Immutable. Points to a YAML file stored on Google Cloud Storage describing the format of a single instance, which are used in PredictRequest.instances, ExplainRequest.instances and BatchPredictionJob.input_config. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). AutoML Models always have this field populated by Vertex AI. Note: The URI given on output will be immutable and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. + - !ruby/object:Api::Type::String + name: 'parametersSchemaUri' + description: | + Immutable. Points to a YAML file stored on Google Cloud Storage describing the parameters of prediction and explanation via PredictRequest.parameters, ExplainRequest.parameters and BatchPredictionJob.model_parameters. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). AutoML Models always have this field populated by Vertex AI, if no parameters are supported, then it is set to an empty string. Note: The URI given on output will be immutable and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. + - !ruby/object:Api::Type::String + name: 'predictionSchemaUri' + description: | + Immutable. Points to a YAML file stored on Google Cloud Storage describing the format of a single prediction produced by this Model, which are returned via PredictResponse.predictions, ExplainResponse.explanations, and BatchPredictionJob.output_config. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). AutoML Models always have this field populated by Vertex AI. Note: The URI given on output will be immutable and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. + - !ruby/object:Api::Type::NestedObject + name: 'containerSpec' + description: | + Specification of a container for serving predictions. Some fields in this message correspond to fields in the [Kubernetes Container v1 core specification](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#container-v1-core). + properties: + - !ruby/object:Api::Type::String + name: 'predictRoute' + description: | + Immutable. HTTP path on the container to send prediction requests to. Vertex AI forwards requests sent using projects.locations.endpoints.predict to this path on the container's IP address and port. Vertex AI then returns the container's response in the API response. For example, if you set this field to `/foo`, then when Vertex AI receives a prediction request, it forwards the request body in a POST request to the `/foo` path on the port of your container specified by the first value of this `ModelContainerSpec`'s ports field. If you don't specify this field, it defaults to the following value when you deploy this Model to an Endpoint: /v1/endpoints/ENDPOINT/deployedModels/DEPLOYED_MODEL:predict The placeholders in this value are replaced as follows: * ENDPOINT: The last segment (following `endpoints/`)of the Endpoint.name][] field of the Endpoint where this Model has been deployed. (Vertex AI makes this value available to your container code as the [`AIP_ENDPOINT_ID` environment variable](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables).) * DEPLOYED_MODEL: DeployedModel.id of the `DeployedModel`. (Vertex AI makes this value available to your container code as the [`AIP_DEPLOYED_MODEL_ID` environment variable](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables).) + - !ruby/object:Api::Type::String + name: 'imageUri' + description: | + Required. Immutable. URI of the Docker image to be used as the custom container for serving predictions. This URI must identify an image in Artifact Registry or Container Registry. Learn more about the [container publishing requirements](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#publishing), including permissions requirements for the Vertex AI Service Agent. The container image is ingested upon ModelService.UploadModel, stored internally, and this original path is afterwards not used. To learn about the requirements for the Docker image itself, see [Custom container requirements](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#). You can use the URI to one of Vertex AI's [pre-built container images for prediction](https://cloud.google.com/vertex-ai/docs/predictions/pre-built-containers) in this field. + - !ruby/object:Api::Type::Array + name: 'env' + description: | + Immutable. List of environment variables to set in the container. After the container starts running, code running in the container can read these environment variables. Additionally, the command and args fields can reference these variables. Later entries in this list can also reference earlier entries. For example, the following example sets the variable `VAR_2` to have the value `foo bar`: ```json [ { "name": "VAR_1", "value": "foo" }, { "name": "VAR_2", "value": "$(VAR_1) bar" } ] ``` If you switch the order of the variables in the example, then the expansion does not occur. This field corresponds to the `env` field of the Kubernetes Containers [v1 core API](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#container-v1-core). + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'name' + description: | + Required. Name of the environment variable. Must be a valid C identifier. + - !ruby/object:Api::Type::String + name: 'value' + description: | + Required. Variables that reference a $(VAR_NAME) are expanded using the previous defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. + - !ruby/object:Api::Type::Array + name: 'args' + description: | + Immutable. Specifies arguments for the command that runs when the container starts. This overrides the container's [`CMD`](https://docs.docker.com/engine/reference/builder/#cmd). Specify this field as an array of executable and arguments, similar to a Docker `CMD`'s "default parameters" form. If you don't specify this field but do specify the command field, then the command from the `command` field runs without any additional arguments. See the [Kubernetes documentation about how the `command` and `args` fields interact with a container's `ENTRYPOINT` and `CMD`](https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#notes). If you don't specify this field and don't specify the `command` field, then the container's [`ENTRYPOINT`](https://docs.docker.com/engine/reference/builder/#cmd) and `CMD` determine what runs based on their default behavior. See the Docker documentation about [how `CMD` and `ENTRYPOINT` interact](https://docs.docker.com/engine/reference/builder/#understand-how-cmd-and-entrypoint-interact). In this field, you can reference [environment variables set by Vertex AI](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables) and environment variables set in the env field. You cannot reference environment variables set in the Docker image. In order for environment variables to be expanded, reference them by using the following syntax: $( VARIABLE_NAME) Note that this differs from Bash variable expansion, which does not use parentheses. If a variable cannot be resolved, the reference in the input string is used unchanged. To avoid variable expansion, you can escape this syntax with `$$`; for example: $$(VARIABLE_NAME) This field corresponds to the `args` field of the Kubernetes Containers [v1 core API](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#container-v1-core). + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'command' + description: | + Immutable. Specifies the command that runs when the container starts. This overrides the container's [ENTRYPOINT](https://docs.docker.com/engine/reference/builder/#entrypoint). Specify this field as an array of executable and arguments, similar to a Docker `ENTRYPOINT`'s "exec" form, not its "shell" form. If you do not specify this field, then the container's `ENTRYPOINT` runs, in conjunction with the args field or the container's [`CMD`](https://docs.docker.com/engine/reference/builder/#cmd), if either exists. If this field is not specified and the container does not have an `ENTRYPOINT`, then refer to the Docker documentation about [how `CMD` and `ENTRYPOINT` interact](https://docs.docker.com/engine/reference/builder/#understand-how-cmd-and-entrypoint-interact). If you specify this field, then you can also specify the `args` field to provide additional arguments for this command. However, if you specify this field, then the container's `CMD` is ignored. See the [Kubernetes documentation about how the `command` and `args` fields interact with a container's `ENTRYPOINT` and `CMD`](https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#notes). In this field, you can reference [environment variables set by Vertex AI](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables) and environment variables set in the env field. You cannot reference environment variables set in the Docker image. In order for environment variables to be expanded, reference them by using the following syntax: $( VARIABLE_NAME) Note that this differs from Bash variable expansion, which does not use parentheses. If a variable cannot be resolved, the reference in the input string is used unchanged. To avoid variable expansion, you can escape this syntax with `$$`; for example: $$(VARIABLE_NAME) This field corresponds to the `command` field of the Kubernetes Containers [v1 core API](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#container-v1-core). + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'ports' + description: | + Immutable. List of ports to expose from the container. Vertex AI sends any prediction requests that it receives to the first port on this list. Vertex AI also sends [liveness and health checks](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#liveness) to this port. If you do not specify this field, it defaults to following value: ```json [ { "containerPort": 8080 } ] ``` Vertex AI does not use ports other than the first one listed. This field corresponds to the `ports` field of the Kubernetes Containers [v1 core API](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#container-v1-core). + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::Integer + name: 'containerPort' + description: | + The number of the port to expose on the pod's IP address. Must be a valid port number, between 1 and 65535 inclusive. + - !ruby/object:Api::Type::String + name: 'healthRoute' + description: | + Immutable. HTTP path on the container to send health checks to. Vertex AI intermittently sends GET requests to this path on the container's IP address and port to check that the container is healthy. Read more about [health checks](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#health). For example, if you set this field to `/bar`, then Vertex AI intermittently sends a GET request to the `/bar` path on the port of your container specified by the first value of this `ModelContainerSpec`'s ports field. If you don't specify this field, it defaults to the following value when you deploy this Model to an Endpoint: /v1/endpoints/ENDPOINT/deployedModels/ DEPLOYED_MODEL:predict The placeholders in this value are replaced as follows: * ENDPOINT: The last segment (following `endpoints/`)of the Endpoint.name][] field of the Endpoint where this Model has been deployed. (Vertex AI makes this value available to your container code as the [`AIP_ENDPOINT_ID` environment variable](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables).) * DEPLOYED_MODEL: DeployedModel.id of the `DeployedModel`. (Vertex AI makes this value available to your container code as the [`AIP_DEPLOYED_MODEL_ID` environment variable](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables).) + - !ruby/object:Api::Type::NestedObject + name: 'completionStats' + description: | + Success and error statistics of processing multiple entities (for example, DataItems or structured data rows) in batch. + properties: + - !ruby/object:Api::Type::String + name: 'successfulForecastPointCount' + description: | + Output only. The number of the successful forecast points that are generated by the forecasting model. This is ONLY used by the forecasting batch prediction. + - !ruby/object:Api::Type::String + name: 'incompleteCount' + description: | + Output only. In cases when enough errors are encountered a job, pipeline, or operation may be failed as a whole. Below is the number of entities for which the processing had not been finished (either in successful or failed state). Set to -1 if the number is unknown (for example, the operation failed before the total entity number could be collected). + - !ruby/object:Api::Type::String + name: 'failedCount' + description: | + Output only. The number of entities for which any error was encountered. + - !ruby/object:Api::Type::String + name: 'successfulCount' + description: | + Output only. The number of entities that had been processed successfully. + - !ruby/object:Api::Type::String + name: 'startTime' + description: | + Output only. Time when the BatchPredictionJob for the first time entered the `JOB_STATE_RUNNING` state. + - !ruby/object:Api::Type::NestedObject + name: 'manualBatchTuningParameters' + description: | + Manual batch tuning parameters. + properties: + - !ruby/object:Api::Type::Integer + name: 'batchSize' + description: | + Immutable. The number of the records (e.g. instances) of the operation given in each batch to a machine replica. Machine type, and size of a single record should be considered when setting this parameter, higher value speeds up the batch operation's execution, but too high value will result in a whole batch not fitting in a machine's memory, and the whole operation will fail. The default value is 64. + - !ruby/object:Api::Type::String + name: 'updateTime' + description: | + Output only. Time when the BatchPredictionJob was most recently updated. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Output only. Resource name of the BatchPredictionJob. + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + The labels with user-defined metadata to organize BatchPredictionJobs. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::Enum + name: 'state' + description: | + Output only. The detailed state of the job. + values: + - :JOB_STATE_UNSPECIFIED + - :JOB_STATE_QUEUED + - :JOB_STATE_PENDING + - :JOB_STATE_RUNNING + - :JOB_STATE_SUCCEEDED + - :JOB_STATE_FAILED + - :JOB_STATE_CANCELLING + - :JOB_STATE_CANCELLED + - :JOB_STATE_PAUSED + - :JOB_STATE_EXPIRED + - :JOB_STATE_UPDATING + - :JOB_STATE_PARTIALLY_SUCCEEDED + - !ruby/object:Api::Type::NestedObject + name: 'encryptionSpec' + description: | + Represents a customer-managed encryption key spec that can be applied to a top-level resource. + properties: + - !ruby/object:Api::Type::String + name: 'kmsKeyName' + description: | + Required. The Cloud KMS resource identifier of the customer managed encryption key used to protect a resource. Has the form: `projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key`. The key needs to be in the same region as where the compute resource is created. + - !ruby/object:Api::Type::Array + name: 'partialFailures' + description: | + Output only. Partial failures encountered. For example, single files that can't be read. This field never exceeds 20 entries. Status details fields contain standard Google Cloud error details. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::Array + name: 'details' + description: | + A list of messages that carry the error details. There is a common set of message types for APIs to use. + item_type: Api::Type::String + - !ruby/object:Api::Type::Integer + name: 'code' + description: | + The status code, which should be an enum value of google.rpc.Code. + - !ruby/object:Api::Type::String + name: 'message' + description: | + A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client. + - !ruby/object:Api::Type::String + name: 'model' + description: | + The name of the Model resource that produces the predictions via this job, must share the same ancestor Location. Starting this job has no impact on any existing deployments of the Model and their resources. Exactly one of model and unmanaged_container_model must be set. The model resource name may contain version id or version alias to specify the version. Example: `projects/{project}/locations/{location}/models/{model}@2` or `projects/{project}/locations/{location}/models/{model}@golden` if no version is specified, the default version will be deployed. The model resource could also be a publisher model. Example: `publishers/{publisher}/models/{model}` or `projects/{project}/locations/{location}/publishers/{publisher}/models/{model}` + - !ruby/object:Api::Type::NestedObject + name: 'outputConfig' + description: | + Configures the output of BatchPredictionJob. See Model.supported_output_storage_formats for supported output formats, and how predictions are expressed via any of them. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'gcsDestination' + description: | + The Google Cloud Storage location where the output is to be written to. + properties: + - !ruby/object:Api::Type::String + name: 'outputUriPrefix' + description: | + Required. Google Cloud Storage URI to output directory. If the uri doesn't end with '/', a '/' will be automatically appended. The directory is created if it doesn't exist. + - !ruby/object:Api::Type::String + name: 'predictionsFormat' + description: | + Required. The format in which Vertex AI gives the predictions, must be one of the Model's supported_output_storage_formats. + - !ruby/object:Api::Type::NestedObject + name: 'bigqueryDestination' + description: | + The BigQuery location for the output content. + properties: + - !ruby/object:Api::Type::String + name: 'outputUri' + description: | + Required. BigQuery URI to a project or table, up to 2000 characters long. When only the project is specified, the Dataset and Table is created. When the full table reference is specified, the Dataset must exist and table must not exist. Accepted forms: * BigQuery path. For example: `bq://projectId` or `bq://projectId.bqDatasetId` or `bq://projectId.bqDatasetId.bqTableId`. + - !ruby/object:Api::Type::String + name: 'displayName' + description: | + Required. The user-defined name of this BatchPredictionJob. + - !ruby/object:Api::Type::String + name: 'serviceAccount' + description: | + The service account that the DeployedModel's container runs as. If not specified, a system generated one will be used, which has minimal permissions and the custom container, if used, may not have enough permission to access other Google Cloud resources. Users deploying the Model must have the `iam.serviceAccounts.actAs` permission on this service account. + Required. The user-defined name of the Dataset. The name can be up to 128 characters long and can consist of any UTF-8 characters. + + - !ruby/object:Api::Resource + name: CustomJob + base_url: '{{parent}}/customJobs' + self_link: '{{name}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/vertex-ai/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + Represents a job that runs custom workloads such as a Docker container or a Python package. A CustomJob can have multiple worker pools and each worker pool can have its own machine and input spec. A CustomJob will be cleaned up once the job enters terminal state (failed or succeeded). + properties: + + - !ruby/object:Api::Type::NestedObject + name: 'error' + description: | + The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). + properties: + - !ruby/object:Api::Type::Array + name: 'details' + description: | + A list of messages that carry the error details. There is a common set of message types for APIs to use. + item_type: Api::Type::String + - !ruby/object:Api::Type::Integer + name: 'code' + description: | + The status code, which should be an enum value of google.rpc.Code. + - !ruby/object:Api::Type::String + name: 'message' + description: | + A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client. + - !ruby/object:Api::Type::NestedObject + name: 'webAccessUris' + description: | + Output only. URIs for accessing [interactive shells](https://cloud.google.com/vertex-ai/docs/training/monitor-debug-interactive-shell) (one URI for each training node). Only available if job_spec.enable_web_access is `true`. The keys are names of each node in the training job; for example, `workerpool0-0` for the primary node, `workerpool1-0` for the first node in the second worker pool, and `workerpool1-1` for the second node in the second worker pool. The values are the URIs for each node's interactive shell. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'jobSpec' + description: | + Represents the spec of a CustomJob. + properties: + - !ruby/object:Api::Type::Array + name: 'workerPoolSpecs' + description: | + Required. The spec of the worker pools including machine type and Docker image. All worker pools except the first one are optional and can be skipped by providing an empty value. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::NestedObject + name: 'containerSpec' + description: | + The spec of a Container. + properties: + - !ruby/object:Api::Type::Array + name: 'env' + description: | + Environment variables to be passed to the container. Maximum limit is 100. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'name' + description: | + Required. Name of the environment variable. Must be a valid C identifier. + - !ruby/object:Api::Type::String + name: 'value' + description: | + Required. Variables that reference a $(VAR_NAME) are expanded using the previous defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. + - !ruby/object:Api::Type::Array + name: 'args' + description: | + The arguments to be passed when starting the container. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'command' + description: | + The command to be invoked when the container is started. It overrides the entrypoint instruction in Dockerfile when provided. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'imageUri' + description: | + Required. The URI of a container image in the Container Registry that is to be run on each worker replica. + - !ruby/object:Api::Type::NestedObject + name: 'machineSpec' + description: | + Specification of a single machine. + properties: + - !ruby/object:Api::Type::Enum + name: 'acceleratorType' + description: | + Immutable. The type of accelerator(s) that may be attached to the machine as per accelerator_count. + values: + - :ACCELERATOR_TYPE_UNSPECIFIED + - :NVIDIA_TESLA_K80 + - :NVIDIA_TESLA_P100 + - :NVIDIA_TESLA_V100 + - :NVIDIA_TESLA_P4 + - :NVIDIA_TESLA_T4 + - :NVIDIA_TESLA_A100 + - :NVIDIA_A100_80GB + - :NVIDIA_L4 + - :TPU_V2 + - :TPU_V3 + - :TPU_V4_POD + - !ruby/object:Api::Type::String + name: 'machineType' + description: | + Immutable. The type of the machine. See the [list of machine types supported for prediction](https://cloud.google.com/vertex-ai/docs/predictions/configure-compute#machine-types) See the [list of machine types supported for custom training](https://cloud.google.com/vertex-ai/docs/training/configure-compute#machine-types). For DeployedModel this field is optional, and the default value is `n1-standard-2`. For BatchPredictionJob or as part of WorkerPoolSpec this field is required. + - !ruby/object:Api::Type::Integer + name: 'acceleratorCount' + description: | + The number of accelerators to attach to the machine. + - !ruby/object:Api::Type::String + name: 'replicaCount' + description: | + Optional. The number of worker replicas to use for this worker pool. + - !ruby/object:Api::Type::Array + name: 'nfsMounts' + description: | + Optional. List of NFS mount spec. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'path' + description: | + Required. Source path exported from NFS server. Has to start with '/', and combined with the ip address, it indicates the source mount path in the form of `server:path` + - !ruby/object:Api::Type::String + name: 'mountPoint' + description: | + Required. Destination mount path. The NFS will be mounted for the user under /mnt/nfs/ + - !ruby/object:Api::Type::String + name: 'server' + description: | + Required. IP address of the NFS server. + - !ruby/object:Api::Type::NestedObject + name: 'pythonPackageSpec' + description: | + The spec of a Python packaged code. + properties: + - !ruby/object:Api::Type::Array + name: 'packageUris' + description: | + Required. The Google Cloud Storage location of the Python package files which are the training program and its dependent packages. The maximum number of package URIs is 100. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'env' + description: | + Environment variables to be passed to the python module. Maximum limit is 100. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'name' + description: | + Required. Name of the environment variable. Must be a valid C identifier. + - !ruby/object:Api::Type::String + name: 'value' + description: | + Required. Variables that reference a $(VAR_NAME) are expanded using the previous defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. + - !ruby/object:Api::Type::String + name: 'executorImageUri' + description: | + Required. The URI of a container image in Artifact Registry that will run the provided Python package. Vertex AI provides a wide range of executor images with pre-installed packages to meet users' various use cases. See the list of [pre-built containers for training](https://cloud.google.com/vertex-ai/docs/training/pre-built-containers). You must use an image from this list. + - !ruby/object:Api::Type::Array + name: 'args' + description: | + Command line arguments to be passed to the Python task. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'pythonModule' + description: | + Required. The Python module name to run after installing the packages. + - !ruby/object:Api::Type::NestedObject + name: 'diskSpec' + description: | + Represents the spec of disk options. + properties: + - !ruby/object:Api::Type::String + name: 'bootDiskType' + description: | + Type of the boot disk (default is "pd-ssd"). Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or "pd-standard" (Persistent Disk Hard Disk Drive). + - !ruby/object:Api::Type::Integer + name: 'bootDiskSizeGb' + description: | + Size in GB of the boot disk (default is 100GB). + - !ruby/object:Api::Type::Boolean + name: 'enableWebAccess' + description: | + Optional. Whether you want Vertex AI to enable [interactive shell access](https://cloud.google.com/vertex-ai/docs/training/monitor-debug-interactive-shell) to training containers. If set to `true`, you can access interactive shells at the URIs given by CustomJob.web_access_uris or Trial.web_access_uris (within HyperparameterTuningJob.trials). + - !ruby/object:Api::Type::String + name: 'tensorboard' + description: | + Optional. The name of a Vertex AI Tensorboard resource to which this CustomJob will upload Tensorboard logs. Format: `projects/{project}/locations/{location}/tensorboards/{tensorboard}` + - !ruby/object:Api::Type::String + name: 'experiment' + description: | + Optional. The Experiment associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}` + - !ruby/object:Api::Type::String + name: 'experimentRun' + description: | + Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}` + - !ruby/object:Api::Type::NestedObject + name: 'scheduling' + description: | + All parameters related to queuing and scheduling of custom jobs. + properties: + - !ruby/object:Api::Type::Boolean + name: 'disableRetries' + description: | + Optional. Indicates if the job should retry for internal errors after the job starts running. If true, overrides `Scheduling.restart_job_on_worker_restart` to false. + - !ruby/object:Api::Type::String + name: 'timeout' + description: | + The maximum job running time. The default is 7 days. + - !ruby/object:Api::Type::Boolean + name: 'restartJobOnWorkerRestart' + description: | + Restarts the entire CustomJob if a worker gets restarted. This feature can be used by distributed training jobs that are not resilient to workers leaving and joining a job. + - !ruby/object:Api::Type::Boolean + name: 'enableDashboardAccess' + description: | + Optional. Whether you want Vertex AI to enable access to the customized dashboard in training chief container. If set to `true`, you can access the dashboard at the URIs given by CustomJob.web_access_uris or Trial.web_access_uris (within HyperparameterTuningJob.trials). + - !ruby/object:Api::Type::String + name: 'serviceAccount' + description: | + Specifies the service account for workload run-as account. Users submitting jobs must have act-as permission on this run-as account. If unspecified, the [Vertex AI Custom Code Service Agent](https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) for the CustomJob's project is used. + - !ruby/object:Api::Type::NestedObject + name: 'baseOutputDirectory' + description: | + The Google Cloud Storage location where the output is to be written to. + properties: + - !ruby/object:Api::Type::String + name: 'outputUriPrefix' + description: | + Required. Google Cloud Storage URI to output directory. If the uri doesn't end with '/', a '/' will be automatically appended. The directory is created if it doesn't exist. + - !ruby/object:Api::Type::Array + name: 'reservedIpRanges' + description: | + Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'network' + description: | + Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. + - !ruby/object:Api::Type::String + name: 'startTime' + description: | + Output only. Time when the CustomJob for the first time entered the `JOB_STATE_RUNNING` state. + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + The labels with user-defined metadata to organize CustomJobs. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'encryptionSpec' + description: | + Represents a customer-managed encryption key spec that can be applied to a top-level resource. + properties: + - !ruby/object:Api::Type::String + name: 'kmsKeyName' + description: | + Required. The Cloud KMS resource identifier of the customer managed encryption key used to protect a resource. Has the form: `projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key`. The key needs to be in the same region as where the compute resource is created. + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Output only. Time when the CustomJob was created. + - !ruby/object:Api::Type::String + name: 'updateTime' + description: | + Output only. Time when the CustomJob was most recently updated. + - !ruby/object:Api::Type::String + name: 'endTime' + description: | + Output only. Time when the CustomJob entered any of the following states: `JOB_STATE_SUCCEEDED`, `JOB_STATE_FAILED`, `JOB_STATE_CANCELLED`. + - !ruby/object:Api::Type::Enum + name: 'state' + description: | + Output only. The detailed state of the job. + values: + - :JOB_STATE_UNSPECIFIED + - :JOB_STATE_QUEUED + - :JOB_STATE_PENDING + - :JOB_STATE_RUNNING + - :JOB_STATE_SUCCEEDED + - :JOB_STATE_FAILED + - :JOB_STATE_CANCELLING + - :JOB_STATE_CANCELLED + - :JOB_STATE_PAUSED + - :JOB_STATE_EXPIRED + - :JOB_STATE_UPDATING + - :JOB_STATE_PARTIALLY_SUCCEEDED + - !ruby/object:Api::Type::String + name: 'displayName' + description: | + Required. The display name of the CustomJob. The name can be up to 128 characters long and can consist of any UTF-8 characters. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Output only. Resource name of a CustomJob. + + + + - !ruby/object:Api::Resource + name: NasJob + base_url: '{{parent}}/nasJobs' + self_link: '{{name}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/vertex-ai/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + Represents a Neural Architecture Search (NAS) job. + properties: + + - !ruby/object:Api::Type::NestedObject + name: 'nasJobOutput' + description: | + Represents a uCAIP NasJob output. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'multiTrialJobOutput' + description: | + The output of a multi-trial Neural Architecture Search (NAS) jobs. + properties: + - !ruby/object:Api::Type::Array + name: 'searchTrials' + description: | + Output only. List of NasTrials that were started as part of search stage. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'id' + description: | + Output only. The identifier of the NasTrial assigned by the service. + - !ruby/object:Api::Type::Enum + name: 'state' + description: | + Output only. The detailed state of the NasTrial. + values: + - :STATE_UNSPECIFIED + - :REQUESTED + - :ACTIVE + - :STOPPING + - :SUCCEEDED + - :INFEASIBLE + - !ruby/object:Api::Type::NestedObject + name: 'finalMeasurement' + description: | + A message representing a Measurement of a Trial. A Measurement contains the Metrics got by executing a Trial using suggested hyperparameter values. + properties: + - !ruby/object:Api::Type::Array + name: 'metrics' + description: | + Output only. A list of metrics got by evaluating the objective functions using suggested Parameter values. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'metricId' + description: | + Output only. The ID of the Metric. The Metric should be defined in StudySpec's Metrics. + - !ruby/object:Api::Type::Integer + name: 'value' + description: | + Output only. The value for this metric. + - !ruby/object:Api::Type::String + name: 'elapsedDuration' + description: | + Output only. Time that the Trial has been running at the point of this Measurement. + - !ruby/object:Api::Type::String + name: 'stepCount' + description: | + Output only. The number of steps the machine learning model has been trained for. Must be non-negative. + - !ruby/object:Api::Type::String + name: 'startTime' + description: | + Output only. Time when the NasTrial was started. + - !ruby/object:Api::Type::String + name: 'endTime' + description: | + Output only. Time when the NasTrial's status changed to `SUCCEEDED` or `INFEASIBLE`. + - !ruby/object:Api::Type::Array + name: 'trainTrials' + description: | + Output only. List of NasTrials that were started as part of train stage. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'id' + description: | + Output only. The identifier of the NasTrial assigned by the service. + - !ruby/object:Api::Type::Enum + name: 'state' + description: | + Output only. The detailed state of the NasTrial. + values: + - :STATE_UNSPECIFIED + - :REQUESTED + - :ACTIVE + - :STOPPING + - :SUCCEEDED + - :INFEASIBLE + - !ruby/object:Api::Type::NestedObject + name: 'finalMeasurement' + description: | + A message representing a Measurement of a Trial. A Measurement contains the Metrics got by executing a Trial using suggested hyperparameter values. + properties: + - !ruby/object:Api::Type::Array + name: 'metrics' + description: | + Output only. A list of metrics got by evaluating the objective functions using suggested Parameter values. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'metricId' + description: | + Output only. The ID of the Metric. The Metric should be defined in StudySpec's Metrics. + - !ruby/object:Api::Type::Integer + name: 'value' + description: | + Output only. The value for this metric. + - !ruby/object:Api::Type::String + name: 'elapsedDuration' + description: | + Output only. Time that the Trial has been running at the point of this Measurement. + - !ruby/object:Api::Type::String + name: 'stepCount' + description: | + Output only. The number of steps the machine learning model has been trained for. Must be non-negative. + - !ruby/object:Api::Type::String + name: 'startTime' + description: | + Output only. Time when the NasTrial was started. + - !ruby/object:Api::Type::String + name: 'endTime' + description: | + Output only. Time when the NasTrial's status changed to `SUCCEEDED` or `INFEASIBLE`. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Output only. Resource name of the NasJob. + - !ruby/object:Api::Type::String + name: 'endTime' + description: | + Output only. Time when the NasJob entered any of the following states: `JOB_STATE_SUCCEEDED`, `JOB_STATE_FAILED`, `JOB_STATE_CANCELLED`. + - !ruby/object:Api::Type::NestedObject + name: 'error' + description: | + The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). + properties: + - !ruby/object:Api::Type::Array + name: 'details' + description: | + A list of messages that carry the error details. There is a common set of message types for APIs to use. + item_type: Api::Type::String + - !ruby/object:Api::Type::Integer + name: 'code' + description: | + The status code, which should be an enum value of google.rpc.Code. + - !ruby/object:Api::Type::String + name: 'message' + description: | + A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client. + - !ruby/object:Api::Type::Enum + name: 'state' + description: | + Output only. The detailed state of the job. + values: + - :JOB_STATE_UNSPECIFIED + - :JOB_STATE_QUEUED + - :JOB_STATE_PENDING + - :JOB_STATE_RUNNING + - :JOB_STATE_SUCCEEDED + - :JOB_STATE_FAILED + - :JOB_STATE_CANCELLING + - :JOB_STATE_CANCELLED + - :JOB_STATE_PAUSED + - :JOB_STATE_EXPIRED + - :JOB_STATE_UPDATING + - :JOB_STATE_PARTIALLY_SUCCEEDED + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Output only. Time when the NasJob was created. + - !ruby/object:Api::Type::String + name: 'displayName' + description: | + Required. The display name of the NasJob. The name can be up to 128 characters long and can consist of any UTF-8 characters. + - !ruby/object:Api::Type::NestedObject + name: 'nasJobSpec' + description: | + Represents the spec of a NasJob. + properties: + - !ruby/object:Api::Type::String + name: 'searchSpaceSpec' + description: | + It defines the search space for Neural Architecture Search (NAS). + - !ruby/object:Api::Type::String + name: 'resumeNasJobId' + description: | + The ID of the existing NasJob in the same Project and Location which will be used to resume search. search_space_spec and nas_algorithm_spec are obtained from previous NasJob hence should not provide them again for this NasJob. + - !ruby/object:Api::Type::NestedObject + name: 'multiTrialAlgorithmSpec' + description: | + The spec of multi-trial Neural Architecture Search (NAS). + properties: + - !ruby/object:Api::Type::NestedObject + name: 'searchTrialSpec' + description: | + Represent spec for search trials. + properties: + - !ruby/object:Api::Type::Integer + name: 'maxFailedTrialCount' + description: | + The number of failed trials that need to be seen before failing the NasJob. If set to 0, Vertex AI decides how many trials must fail before the whole job fails. + - !ruby/object:Api::Type::Integer + name: 'maxParallelTrialCount' + description: | + Required. The maximum number of trials to run in parallel. + - !ruby/object:Api::Type::NestedObject + name: 'searchTrialJobSpec' + description: | + Represents the spec of a CustomJob. + properties: + - !ruby/object:Api::Type::Array + name: 'workerPoolSpecs' + description: | + Required. The spec of the worker pools including machine type and Docker image. All worker pools except the first one are optional and can be skipped by providing an empty value. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::NestedObject + name: 'containerSpec' + description: | + The spec of a Container. + properties: + - !ruby/object:Api::Type::Array + name: 'env' + description: | + Environment variables to be passed to the container. Maximum limit is 100. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'name' + description: | + Required. Name of the environment variable. Must be a valid C identifier. + - !ruby/object:Api::Type::String + name: 'value' + description: | + Required. Variables that reference a $(VAR_NAME) are expanded using the previous defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. + - !ruby/object:Api::Type::Array + name: 'args' + description: | + The arguments to be passed when starting the container. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'command' + description: | + The command to be invoked when the container is started. It overrides the entrypoint instruction in Dockerfile when provided. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'imageUri' + description: | + Required. The URI of a container image in the Container Registry that is to be run on each worker replica. + - !ruby/object:Api::Type::NestedObject + name: 'machineSpec' + description: | + Specification of a single machine. + properties: + - !ruby/object:Api::Type::Enum + name: 'acceleratorType' + description: | + Immutable. The type of accelerator(s) that may be attached to the machine as per accelerator_count. + values: + - :ACCELERATOR_TYPE_UNSPECIFIED + - :NVIDIA_TESLA_K80 + - :NVIDIA_TESLA_P100 + - :NVIDIA_TESLA_V100 + - :NVIDIA_TESLA_P4 + - :NVIDIA_TESLA_T4 + - :NVIDIA_TESLA_A100 + - :NVIDIA_A100_80GB + - :NVIDIA_L4 + - :TPU_V2 + - :TPU_V3 + - :TPU_V4_POD + - !ruby/object:Api::Type::String + name: 'machineType' + description: | + Immutable. The type of the machine. See the [list of machine types supported for prediction](https://cloud.google.com/vertex-ai/docs/predictions/configure-compute#machine-types) See the [list of machine types supported for custom training](https://cloud.google.com/vertex-ai/docs/training/configure-compute#machine-types). For DeployedModel this field is optional, and the default value is `n1-standard-2`. For BatchPredictionJob or as part of WorkerPoolSpec this field is required. + - !ruby/object:Api::Type::Integer + name: 'acceleratorCount' + description: | + The number of accelerators to attach to the machine. + - !ruby/object:Api::Type::String + name: 'replicaCount' + description: | + Optional. The number of worker replicas to use for this worker pool. + - !ruby/object:Api::Type::Array + name: 'nfsMounts' + description: | + Optional. List of NFS mount spec. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'path' + description: | + Required. Source path exported from NFS server. Has to start with '/', and combined with the ip address, it indicates the source mount path in the form of `server:path` + - !ruby/object:Api::Type::String + name: 'mountPoint' + description: | + Required. Destination mount path. The NFS will be mounted for the user under /mnt/nfs/ + - !ruby/object:Api::Type::String + name: 'server' + description: | + Required. IP address of the NFS server. + - !ruby/object:Api::Type::NestedObject + name: 'pythonPackageSpec' + description: | + The spec of a Python packaged code. + properties: + - !ruby/object:Api::Type::Array + name: 'packageUris' + description: | + Required. The Google Cloud Storage location of the Python package files which are the training program and its dependent packages. The maximum number of package URIs is 100. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'env' + description: | + Environment variables to be passed to the python module. Maximum limit is 100. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'name' + description: | + Required. Name of the environment variable. Must be a valid C identifier. + - !ruby/object:Api::Type::String + name: 'value' + description: | + Required. Variables that reference a $(VAR_NAME) are expanded using the previous defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. + - !ruby/object:Api::Type::String + name: 'executorImageUri' + description: | + Required. The URI of a container image in Artifact Registry that will run the provided Python package. Vertex AI provides a wide range of executor images with pre-installed packages to meet users' various use cases. See the list of [pre-built containers for training](https://cloud.google.com/vertex-ai/docs/training/pre-built-containers). You must use an image from this list. + - !ruby/object:Api::Type::Array + name: 'args' + description: | + Command line arguments to be passed to the Python task. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'pythonModule' + description: | + Required. The Python module name to run after installing the packages. + - !ruby/object:Api::Type::NestedObject + name: 'diskSpec' + description: | + Represents the spec of disk options. + properties: + - !ruby/object:Api::Type::String + name: 'bootDiskType' + description: | + Type of the boot disk (default is "pd-ssd"). Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or "pd-standard" (Persistent Disk Hard Disk Drive). + - !ruby/object:Api::Type::Integer + name: 'bootDiskSizeGb' + description: | + Size in GB of the boot disk (default is 100GB). + - !ruby/object:Api::Type::Boolean + name: 'enableWebAccess' + description: | + Optional. Whether you want Vertex AI to enable [interactive shell access](https://cloud.google.com/vertex-ai/docs/training/monitor-debug-interactive-shell) to training containers. If set to `true`, you can access interactive shells at the URIs given by CustomJob.web_access_uris or Trial.web_access_uris (within HyperparameterTuningJob.trials). + - !ruby/object:Api::Type::String + name: 'tensorboard' + description: | + Optional. The name of a Vertex AI Tensorboard resource to which this CustomJob will upload Tensorboard logs. Format: `projects/{project}/locations/{location}/tensorboards/{tensorboard}` + - !ruby/object:Api::Type::String + name: 'experiment' + description: | + Optional. The Experiment associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}` + - !ruby/object:Api::Type::String + name: 'experimentRun' + description: | + Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}` + - !ruby/object:Api::Type::NestedObject + name: 'scheduling' + description: | + All parameters related to queuing and scheduling of custom jobs. + properties: + - !ruby/object:Api::Type::Boolean + name: 'disableRetries' + description: | + Optional. Indicates if the job should retry for internal errors after the job starts running. If true, overrides `Scheduling.restart_job_on_worker_restart` to false. + - !ruby/object:Api::Type::String + name: 'timeout' + description: | + The maximum job running time. The default is 7 days. + - !ruby/object:Api::Type::Boolean + name: 'restartJobOnWorkerRestart' + description: | + Restarts the entire CustomJob if a worker gets restarted. This feature can be used by distributed training jobs that are not resilient to workers leaving and joining a job. + - !ruby/object:Api::Type::Boolean + name: 'enableDashboardAccess' + description: | + Optional. Whether you want Vertex AI to enable access to the customized dashboard in training chief container. If set to `true`, you can access the dashboard at the URIs given by CustomJob.web_access_uris or Trial.web_access_uris (within HyperparameterTuningJob.trials). + - !ruby/object:Api::Type::String + name: 'serviceAccount' + description: | + Specifies the service account for workload run-as account. Users submitting jobs must have act-as permission on this run-as account. If unspecified, the [Vertex AI Custom Code Service Agent](https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) for the CustomJob's project is used. + - !ruby/object:Api::Type::NestedObject + name: 'baseOutputDirectory' + description: | + The Google Cloud Storage location where the output is to be written to. + properties: + - !ruby/object:Api::Type::String + name: 'outputUriPrefix' + description: | + Required. Google Cloud Storage URI to output directory. If the uri doesn't end with '/', a '/' will be automatically appended. The directory is created if it doesn't exist. + - !ruby/object:Api::Type::Array + name: 'reservedIpRanges' + description: | + Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'network' + description: | + Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. + - !ruby/object:Api::Type::Integer + name: 'maxTrialCount' + description: | + Required. The maximum number of Neural Architecture Search (NAS) trials to run. + - !ruby/object:Api::Type::Enum + name: 'multiTrialAlgorithm' + description: | + The multi-trial Neural Architecture Search (NAS) algorithm type. Defaults to `REINFORCEMENT_LEARNING`. + values: + - :MULTI_TRIAL_ALGORITHM_UNSPECIFIED + - :REINFORCEMENT_LEARNING + - :GRID_SEARCH + - !ruby/object:Api::Type::NestedObject + name: 'trainTrialSpec' + description: | + Represent spec for train trials. + properties: + - !ruby/object:Api::Type::Integer + name: 'frequency' + description: | + Required. Frequency of search trials to start train stage. Top N [TrainTrialSpec.max_parallel_trial_count] search trials will be trained for every M [TrainTrialSpec.frequency] trials searched. + - !ruby/object:Api::Type::Integer + name: 'maxParallelTrialCount' + description: | + Required. The maximum number of trials to run in parallel. + - !ruby/object:Api::Type::NestedObject + name: 'trainTrialJobSpec' + description: | + Represents the spec of a CustomJob. + properties: + - !ruby/object:Api::Type::Array + name: 'workerPoolSpecs' + description: | + Required. The spec of the worker pools including machine type and Docker image. All worker pools except the first one are optional and can be skipped by providing an empty value. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::NestedObject + name: 'containerSpec' + description: | + The spec of a Container. + properties: + - !ruby/object:Api::Type::Array + name: 'env' + description: | + Environment variables to be passed to the container. Maximum limit is 100. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'name' + description: | + Required. Name of the environment variable. Must be a valid C identifier. + - !ruby/object:Api::Type::String + name: 'value' + description: | + Required. Variables that reference a $(VAR_NAME) are expanded using the previous defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. + - !ruby/object:Api::Type::Array + name: 'args' + description: | + The arguments to be passed when starting the container. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'command' + description: | + The command to be invoked when the container is started. It overrides the entrypoint instruction in Dockerfile when provided. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'imageUri' + description: | + Required. The URI of a container image in the Container Registry that is to be run on each worker replica. + - !ruby/object:Api::Type::NestedObject + name: 'machineSpec' + description: | + Specification of a single machine. + properties: + - !ruby/object:Api::Type::Enum + name: 'acceleratorType' + description: | + Immutable. The type of accelerator(s) that may be attached to the machine as per accelerator_count. + values: + - :ACCELERATOR_TYPE_UNSPECIFIED + - :NVIDIA_TESLA_K80 + - :NVIDIA_TESLA_P100 + - :NVIDIA_TESLA_V100 + - :NVIDIA_TESLA_P4 + - :NVIDIA_TESLA_T4 + - :NVIDIA_TESLA_A100 + - :NVIDIA_A100_80GB + - :NVIDIA_L4 + - :TPU_V2 + - :TPU_V3 + - :TPU_V4_POD + - !ruby/object:Api::Type::String + name: 'machineType' + description: | + Immutable. The type of the machine. See the [list of machine types supported for prediction](https://cloud.google.com/vertex-ai/docs/predictions/configure-compute#machine-types) See the [list of machine types supported for custom training](https://cloud.google.com/vertex-ai/docs/training/configure-compute#machine-types). For DeployedModel this field is optional, and the default value is `n1-standard-2`. For BatchPredictionJob or as part of WorkerPoolSpec this field is required. + - !ruby/object:Api::Type::Integer + name: 'acceleratorCount' + description: | + The number of accelerators to attach to the machine. + - !ruby/object:Api::Type::String + name: 'replicaCount' + description: | + Optional. The number of worker replicas to use for this worker pool. + - !ruby/object:Api::Type::Array + name: 'nfsMounts' + description: | + Optional. List of NFS mount spec. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'path' + description: | + Required. Source path exported from NFS server. Has to start with '/', and combined with the ip address, it indicates the source mount path in the form of `server:path` + - !ruby/object:Api::Type::String + name: 'mountPoint' + description: | + Required. Destination mount path. The NFS will be mounted for the user under /mnt/nfs/ + - !ruby/object:Api::Type::String + name: 'server' + description: | + Required. IP address of the NFS server. + - !ruby/object:Api::Type::NestedObject + name: 'pythonPackageSpec' + description: | + The spec of a Python packaged code. + properties: + - !ruby/object:Api::Type::Array + name: 'packageUris' + description: | + Required. The Google Cloud Storage location of the Python package files which are the training program and its dependent packages. The maximum number of package URIs is 100. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'env' + description: | + Environment variables to be passed to the python module. Maximum limit is 100. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'name' + description: | + Required. Name of the environment variable. Must be a valid C identifier. + - !ruby/object:Api::Type::String + name: 'value' + description: | + Required. Variables that reference a $(VAR_NAME) are expanded using the previous defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. + - !ruby/object:Api::Type::String + name: 'executorImageUri' + description: | + Required. The URI of a container image in Artifact Registry that will run the provided Python package. Vertex AI provides a wide range of executor images with pre-installed packages to meet users' various use cases. See the list of [pre-built containers for training](https://cloud.google.com/vertex-ai/docs/training/pre-built-containers). You must use an image from this list. + - !ruby/object:Api::Type::Array + name: 'args' + description: | + Command line arguments to be passed to the Python task. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'pythonModule' + description: | + Required. The Python module name to run after installing the packages. + - !ruby/object:Api::Type::NestedObject + name: 'diskSpec' + description: | + Represents the spec of disk options. + properties: + - !ruby/object:Api::Type::String + name: 'bootDiskType' + description: | + Type of the boot disk (default is "pd-ssd"). Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or "pd-standard" (Persistent Disk Hard Disk Drive). + - !ruby/object:Api::Type::Integer + name: 'bootDiskSizeGb' + description: | + Size in GB of the boot disk (default is 100GB). + - !ruby/object:Api::Type::Boolean + name: 'enableWebAccess' + description: | + Optional. Whether you want Vertex AI to enable [interactive shell access](https://cloud.google.com/vertex-ai/docs/training/monitor-debug-interactive-shell) to training containers. If set to `true`, you can access interactive shells at the URIs given by CustomJob.web_access_uris or Trial.web_access_uris (within HyperparameterTuningJob.trials). + - !ruby/object:Api::Type::String + name: 'tensorboard' + description: | + Optional. The name of a Vertex AI Tensorboard resource to which this CustomJob will upload Tensorboard logs. Format: `projects/{project}/locations/{location}/tensorboards/{tensorboard}` + - !ruby/object:Api::Type::String + name: 'experiment' + description: | + Optional. The Experiment associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}` + - !ruby/object:Api::Type::String + name: 'experimentRun' + description: | + Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}` + - !ruby/object:Api::Type::NestedObject + name: 'scheduling' + description: | + All parameters related to queuing and scheduling of custom jobs. + properties: + - !ruby/object:Api::Type::Boolean + name: 'disableRetries' + description: | + Optional. Indicates if the job should retry for internal errors after the job starts running. If true, overrides `Scheduling.restart_job_on_worker_restart` to false. + - !ruby/object:Api::Type::String + name: 'timeout' + description: | + The maximum job running time. The default is 7 days. + - !ruby/object:Api::Type::Boolean + name: 'restartJobOnWorkerRestart' + description: | + Restarts the entire CustomJob if a worker gets restarted. This feature can be used by distributed training jobs that are not resilient to workers leaving and joining a job. + - !ruby/object:Api::Type::Boolean + name: 'enableDashboardAccess' + description: | + Optional. Whether you want Vertex AI to enable access to the customized dashboard in training chief container. If set to `true`, you can access the dashboard at the URIs given by CustomJob.web_access_uris or Trial.web_access_uris (within HyperparameterTuningJob.trials). + - !ruby/object:Api::Type::String + name: 'serviceAccount' + description: | + Specifies the service account for workload run-as account. Users submitting jobs must have act-as permission on this run-as account. If unspecified, the [Vertex AI Custom Code Service Agent](https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) for the CustomJob's project is used. + - !ruby/object:Api::Type::NestedObject + name: 'baseOutputDirectory' + description: | + The Google Cloud Storage location where the output is to be written to. + properties: + - !ruby/object:Api::Type::String + name: 'outputUriPrefix' + description: | + Required. Google Cloud Storage URI to output directory. If the uri doesn't end with '/', a '/' will be automatically appended. The directory is created if it doesn't exist. + - !ruby/object:Api::Type::Array + name: 'reservedIpRanges' + description: | + Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'network' + description: | + Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. + - !ruby/object:Api::Type::NestedObject + name: 'metric' + description: | + Represents a metric to optimize. + properties: + - !ruby/object:Api::Type::Enum + name: 'goal' + description: | + Required. The optimization goal of the metric. + values: + - :GOAL_TYPE_UNSPECIFIED + - :MAXIMIZE + - :MINIMIZE + - !ruby/object:Api::Type::String + name: 'metricId' + description: | + Required. The ID of the metric. Must not contain whitespaces. + - !ruby/object:Api::Type::Boolean + name: 'enableRestrictedImageTraining' + description: | + Optional. Enable a separation of Custom model training and restricted image training for tenant project. + - !ruby/object:Api::Type::String + name: 'startTime' + description: | + Output only. Time when the NasJob for the first time entered the `JOB_STATE_RUNNING` state. + - !ruby/object:Api::Type::NestedObject + name: 'encryptionSpec' + description: | + Represents a customer-managed encryption key spec that can be applied to a top-level resource. + properties: + - !ruby/object:Api::Type::String + name: 'kmsKeyName' + description: | + Required. The Cloud KMS resource identifier of the customer managed encryption key used to protect a resource. Has the form: `projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key`. The key needs to be in the same region as where the compute resource is created. + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + The labels with user-defined metadata to organize NasJobs. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::String + name: 'updateTime' + description: | + Output only. Time when the NasJob was most recently updated. + + + + - !ruby/object:Api::Resource + name: Endpoint + base_url: '{{parent}}/endpoints' + self_link: '{{name}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/vertex-ai/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + Models are deployed into it, and afterwards Endpoint is called to obtain predictions and explanations. + properties: + + - !ruby/object:Api::Type::NestedObject + name: 'encryptionSpec' + description: | + Represents a customer-managed encryption key spec that can be applied to a top-level resource. + properties: + - !ruby/object:Api::Type::String + name: 'kmsKeyName' + description: | + Required. The Cloud KMS resource identifier of the customer managed encryption key used to protect a resource. Has the form: `projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key`. The key needs to be in the same region as where the compute resource is created. + - !ruby/object:Api::Type::Boolean + name: 'enablePrivateServiceConnect' + description: | + Deprecated: If true, expose the Endpoint via private service connect. Only one of the fields, network or enable_private_service_connect, can be set. + - !ruby/object:Api::Type::String + name: 'updateTime' + description: | + Output only. Timestamp when this Endpoint was last updated. + - !ruby/object:Api::Type::String + name: 'modelDeploymentMonitoringJob' + description: | + Output only. Resource name of the Model Monitoring job associated with this Endpoint if monitoring is enabled by JobService.CreateModelDeploymentMonitoringJob. Format: `projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}` + - !ruby/object:Api::Type::String + name: 'description' + description: | + The description of the Endpoint. + - !ruby/object:Api::Type::Array + name: 'deployedModels' + description: | + Output only. The models deployed in this Endpoint. To add or remove DeployedModels use EndpointService.DeployModel and EndpointService.UndeployModel respectively. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Output only. Timestamp when the DeployedModel was created. + - !ruby/object:Api::Type::NestedObject + name: 'privateEndpoints' + description: | + PrivateEndpoints proto is used to provide paths for users to send requests privately. To send request via private service access, use predict_http_uri, explain_http_uri or health_http_uri. To send request via private service connect, use service_attachment. + properties: + - !ruby/object:Api::Type::String + name: 'healthHttpUri' + description: | + Output only. Http(s) path to send health check requests. + - !ruby/object:Api::Type::String + name: 'explainHttpUri' + description: | + Output only. Http(s) path to send explain requests. + - !ruby/object:Api::Type::String + name: 'predictHttpUri' + description: | + Output only. Http(s) path to send prediction requests. + - !ruby/object:Api::Type::String + name: 'serviceAttachment' + description: | + Output only. The name of the service attachment resource. Populated if private service connect is enabled. + - !ruby/object:Api::Type::Boolean + name: 'disableContainerLogging' + description: | + For custom-trained Models and AutoML Tabular Models, the container of the DeployedModel instances will send `stderr` and `stdout` streams to Cloud Logging by default. Please note that the logs incur cost, which are subject to [Cloud Logging pricing](https://cloud.google.com/logging/pricing). User can disable container logging by setting this flag to true. + - !ruby/object:Api::Type::String + name: 'modelVersionId' + description: | + Output only. The version ID of the model that is deployed. + - !ruby/object:Api::Type::NestedObject + name: 'explanationSpec' + description: | + Specification of Model explanation. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'parameters' + description: | + Parameters to configure explaining for Model's predictions. + properties: + - !ruby/object:Api::Type::Array + name: 'outputIndices' + description: | + If populated, only returns attributions that have output_index contained in output_indices. It must be an ndarray of integers, with the same shape of the output it's explaining. If not populated, returns attributions for top_k indices of outputs. If neither top_k nor output_indices is populated, returns the argmax index of the outputs. Only applicable to Models that predict multiple outputs (e,g, multi-class Models that predict multiple classes). + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'examples' + description: | + Example-based explainability that returns the nearest neighbors from the provided dataset. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'presets' + description: | + Preset configuration for example-based explanations + properties: + - !ruby/object:Api::Type::Enum + name: 'modality' + description: | + The modality of the uploaded model, which automatically configures the distance measurement and feature normalization for the underlying example index and queries. If your model does not precisely fit one of these types, it is okay to choose the closest type. + values: + - :MODALITY_UNSPECIFIED + - :IMAGE + - :TEXT + - :TABULAR + - !ruby/object:Api::Type::Enum + name: 'query' + description: | + Preset option controlling parameters for speed-precision trade-off when querying for examples. If omitted, defaults to `PRECISE`. + values: + - :PRECISE + - :FAST + - !ruby/object:Api::Type::Integer + name: 'neighborCount' + description: | + The number of neighbors to return when querying for examples. + - !ruby/object:Api::Type::NestedObject + name: 'exampleGcsSource' + description: | + The Cloud Storage input instances. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'gcsSource' + description: | + The Google Cloud Storage location for the input content. + properties: + - !ruby/object:Api::Type::Array + name: 'uris' + description: | + Required. Google Cloud Storage URI(-s) to the input file(s). May contain wildcards. For more information on wildcards, see https://cloud.google.com/storage/docs/gsutil/addlhelp/WildcardNames. + item_type: Api::Type::String + - !ruby/object:Api::Type::Enum + name: 'dataFormat' + description: | + The format in which instances are given, if not specified, assume it's JSONL format. Currently only JSONL format is supported. + values: + - :DATA_FORMAT_UNSPECIFIED + - :JSONL + - !ruby/object:Api::Type::String + name: 'nearestNeighborSearchConfig' + description: | + The full configuration for the generated index, the semantics are the same as metadata and should match [NearestNeighborSearchConfig](https://cloud.google.com/vertex-ai/docs/explainable-ai/configuring-explanations-example-based#nearest-neighbor-search-config). + - !ruby/object:Api::Type::NestedObject + name: 'xraiAttribution' + description: | + An explanation method that redistributes Integrated Gradients attributions to segmented regions, taking advantage of the model's fully differentiable structure. Refer to this paper for more details: https://arxiv.org/abs/1906.02825 Supported only by image Models. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'smoothGradConfig' + description: | + Config for SmoothGrad approximation of gradients. When enabled, the gradients are approximated by averaging the gradients from noisy samples in the vicinity of the inputs. Adding noise can help improve the computed gradients. Refer to this paper for more details: https://arxiv.org/pdf/1706.03825.pdf + properties: + - !ruby/object:Api::Type::NestedObject + name: 'featureNoiseSigma' + description: | + Noise sigma by features. Noise sigma represents the standard deviation of the gaussian kernel that will be used to add noise to interpolated inputs prior to computing gradients. + properties: + - !ruby/object:Api::Type::Array + name: 'noiseSigma' + description: | + Noise sigma per feature. No noise is added to features that are not set. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'name' + description: | + The name of the input feature for which noise sigma is provided. The features are defined in explanation metadata inputs. + - !ruby/object:Api::Type::Integer + name: 'sigma' + description: | + This represents the standard deviation of the Gaussian kernel that will be used to add noise to the feature prior to computing gradients. Similar to noise_sigma but represents the noise added to the current feature. Defaults to 0.1. + - !ruby/object:Api::Type::Integer + name: 'noiseSigma' + description: | + This is a single float value and will be used to add noise to all the features. Use this field when all features are normalized to have the same distribution: scale to range [0, 1], [-1, 1] or z-scoring, where features are normalized to have 0-mean and 1-variance. Learn more about [normalization](https://developers.google.com/machine-learning/data-prep/transform/normalization). For best results the recommended value is about 10% - 20% of the standard deviation of the input feature. Refer to section 3.2 of the SmoothGrad paper: https://arxiv.org/pdf/1706.03825.pdf. Defaults to 0.1. If the distribution is different per feature, set feature_noise_sigma instead for each feature. + - !ruby/object:Api::Type::Integer + name: 'noisySampleCount' + description: | + The number of gradient samples to use for approximation. The higher this number, the more accurate the gradient is, but the runtime complexity increases by this factor as well. Valid range of its value is [1, 50]. Defaults to 3. + - !ruby/object:Api::Type::Integer + name: 'stepCount' + description: | + Required. The number of steps for approximating the path integral. A good value to start is 50 and gradually increase until the sum to diff property is met within the desired error range. Valid range of its value is [1, 100], inclusively. + - !ruby/object:Api::Type::NestedObject + name: 'blurBaselineConfig' + description: | + Config for blur baseline. When enabled, a linear path from the maximally blurred image to the input image is created. Using a blurred baseline instead of zero (black image) is motivated by the BlurIG approach explained here: https://arxiv.org/abs/2004.03383 + properties: + - !ruby/object:Api::Type::Integer + name: 'maxBlurSigma' + description: | + The standard deviation of the blur kernel for the blurred baseline. The same blurring parameter is used for both the height and the width dimension. If not set, the method defaults to the zero (i.e. black for images) baseline. + - !ruby/object:Api::Type::Integer + name: 'topK' + description: | + If populated, returns attributions for top K indices of outputs (defaults to 1). Only applies to Models that predicts more than one outputs (e,g, multi-class Models). When set to -1, returns explanations for all outputs. + - !ruby/object:Api::Type::NestedObject + name: 'integratedGradientsAttribution' + description: | + An attribution method that computes the Aumann-Shapley value taking advantage of the model's fully differentiable structure. Refer to this paper for more details: https://arxiv.org/abs/1703.01365 + properties: + - !ruby/object:Api::Type::Integer + name: 'stepCount' + description: | + Required. The number of steps for approximating the path integral. A good value to start is 50 and gradually increase until the sum to diff property is within the desired error range. Valid range of its value is [1, 100], inclusively. + - !ruby/object:Api::Type::NestedObject + name: 'smoothGradConfig' + description: | + Config for SmoothGrad approximation of gradients. When enabled, the gradients are approximated by averaging the gradients from noisy samples in the vicinity of the inputs. Adding noise can help improve the computed gradients. Refer to this paper for more details: https://arxiv.org/pdf/1706.03825.pdf + properties: + - !ruby/object:Api::Type::NestedObject + name: 'featureNoiseSigma' + description: | + Noise sigma by features. Noise sigma represents the standard deviation of the gaussian kernel that will be used to add noise to interpolated inputs prior to computing gradients. + properties: + - !ruby/object:Api::Type::Array + name: 'noiseSigma' + description: | + Noise sigma per feature. No noise is added to features that are not set. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'name' + description: | + The name of the input feature for which noise sigma is provided. The features are defined in explanation metadata inputs. + - !ruby/object:Api::Type::Integer + name: 'sigma' + description: | + This represents the standard deviation of the Gaussian kernel that will be used to add noise to the feature prior to computing gradients. Similar to noise_sigma but represents the noise added to the current feature. Defaults to 0.1. + - !ruby/object:Api::Type::Integer + name: 'noiseSigma' + description: | + This is a single float value and will be used to add noise to all the features. Use this field when all features are normalized to have the same distribution: scale to range [0, 1], [-1, 1] or z-scoring, where features are normalized to have 0-mean and 1-variance. Learn more about [normalization](https://developers.google.com/machine-learning/data-prep/transform/normalization). For best results the recommended value is about 10% - 20% of the standard deviation of the input feature. Refer to section 3.2 of the SmoothGrad paper: https://arxiv.org/pdf/1706.03825.pdf. Defaults to 0.1. If the distribution is different per feature, set feature_noise_sigma instead for each feature. + - !ruby/object:Api::Type::Integer + name: 'noisySampleCount' + description: | + The number of gradient samples to use for approximation. The higher this number, the more accurate the gradient is, but the runtime complexity increases by this factor as well. Valid range of its value is [1, 50]. Defaults to 3. + - !ruby/object:Api::Type::NestedObject + name: 'blurBaselineConfig' + description: | + Config for blur baseline. When enabled, a linear path from the maximally blurred image to the input image is created. Using a blurred baseline instead of zero (black image) is motivated by the BlurIG approach explained here: https://arxiv.org/abs/2004.03383 + properties: + - !ruby/object:Api::Type::Integer + name: 'maxBlurSigma' + description: | + The standard deviation of the blur kernel for the blurred baseline. The same blurring parameter is used for both the height and the width dimension. If not set, the method defaults to the zero (i.e. black for images) baseline. + - !ruby/object:Api::Type::NestedObject + name: 'sampledShapleyAttribution' + description: | + An attribution method that approximates Shapley values for features that contribute to the label being predicted. A sampling strategy is used to approximate the value rather than considering all subsets of features. + properties: + - !ruby/object:Api::Type::Integer + name: 'pathCount' + description: | + Required. The number of feature permutations to consider when approximating the Shapley values. Valid range of its value is [1, 50], inclusively. + - !ruby/object:Api::Type::NestedObject + name: 'metadata' + description: | + Metadata describing the Model's input and output for explanation. + properties: + - !ruby/object:Api::Type::String + name: 'featureAttributionsSchemaUri' + description: | + Points to a YAML file stored on Google Cloud Storage describing the format of the feature attributions. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). AutoML tabular Models always have this field populated by Vertex AI. Note: The URI given on output may be different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. + - !ruby/object:Api::Type::String + name: 'latentSpaceSource' + description: | + Name of the source to generate embeddings for example based explanations. + - !ruby/object:Api::Type::NestedObject + name: 'outputs' + description: | + Required. Map from output names to output metadata. For Vertex AI-provided Tensorflow images, keys can be any user defined string that consists of any UTF-8 characters. For custom images, keys are the name of the output field in the prediction to be explained. Currently only one key is allowed. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + Metadata of the prediction output to be explained. + - !ruby/object:Api::Type::NestedObject + name: 'inputs' + description: | + Required. Map from feature names to feature input metadata. Keys are the name of the features. Values are the specification of the feature. An empty InputMetadata is valid. It describes a text feature which has the name specified as the key in ExplanationMetadata.inputs. The baseline of the empty feature is chosen by Vertex AI. For Vertex AI-provided Tensorflow images, the key can be any friendly name of the feature. Once specified, featureAttributions are keyed by this key (if not grouped with another feature). For custom images, the key must match with the key in instance. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + Metadata of the input of a feature. Fields other than InputMetadata.input_baselines are applicable only for Models that are using Vertex AI-provided images for Tensorflow. + - !ruby/object:Api::Type::Boolean + name: 'enableAccessLogging' + description: | + If true, online prediction access logs are sent to Cloud Logging. These logs are like standard server access logs, containing information like timestamp and latency for each prediction request. Note that logs may incur a cost, especially if your project receives prediction requests at a high queries per second rate (QPS). Estimate your costs before enabling this option. + - !ruby/object:Api::Type::String + name: 'serviceAccount' + description: | + The service account that the DeployedModel's container runs as. Specify the email address of the service account. If this service account is not specified, the container runs as a service account that doesn't have access to the resource project. Users deploying the Model must have the `iam.serviceAccounts.actAs` permission on this service account. + - !ruby/object:Api::Type::NestedObject + name: 'dedicatedResources' + description: | + A description of resources that are dedicated to a DeployedModel, and that need a higher degree of manual configuration. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'machineSpec' + description: | + Specification of a single machine. + properties: + - !ruby/object:Api::Type::Enum + name: 'acceleratorType' + description: | + Immutable. The type of accelerator(s) that may be attached to the machine as per accelerator_count. + values: + - :ACCELERATOR_TYPE_UNSPECIFIED + - :NVIDIA_TESLA_K80 + - :NVIDIA_TESLA_P100 + - :NVIDIA_TESLA_V100 + - :NVIDIA_TESLA_P4 + - :NVIDIA_TESLA_T4 + - :NVIDIA_TESLA_A100 + - :NVIDIA_A100_80GB + - :NVIDIA_L4 + - :TPU_V2 + - :TPU_V3 + - :TPU_V4_POD + - !ruby/object:Api::Type::String + name: 'machineType' + description: | + Immutable. The type of the machine. See the [list of machine types supported for prediction](https://cloud.google.com/vertex-ai/docs/predictions/configure-compute#machine-types) See the [list of machine types supported for custom training](https://cloud.google.com/vertex-ai/docs/training/configure-compute#machine-types). For DeployedModel this field is optional, and the default value is `n1-standard-2`. For BatchPredictionJob or as part of WorkerPoolSpec this field is required. + - !ruby/object:Api::Type::Integer + name: 'acceleratorCount' + description: | + The number of accelerators to attach to the machine. + - !ruby/object:Api::Type::Integer + name: 'maxReplicaCount' + description: | + Immutable. The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If the requested value is too large, the deployment will error, but if deployment succeeds then the ability to scale the model to that many replicas is guaranteed (barring service outages). If traffic against the DeployedModel increases beyond what its replicas at maximum may handle, a portion of the traffic will be dropped. If this value is not provided, will use min_replica_count as the default value. The value of this field impacts the charge against Vertex CPU and GPU quotas. Specifically, you will be charged for (max_replica_count * number of cores in the selected machine type) and (max_replica_count * number of GPUs per replica in the selected machine type). + - !ruby/object:Api::Type::Array + name: 'autoscalingMetricSpecs' + description: | + Immutable. The metric specifications that overrides a resource utilization metric (CPU utilization, accelerator's duty cycle, and so on) target value (default to 60 if not set). At most one entry is allowed per metric. If machine_spec.accelerator_count is above 0, the autoscaling will be based on both CPU utilization and accelerator's duty cycle metrics and scale up when either metrics exceeds its target value while scale down if both metrics are under their target value. The default target value is 60 for both metrics. If machine_spec.accelerator_count is 0, the autoscaling will be based on CPU utilization metric only with default target value 60 if not explicitly set. For example, in the case of Online Prediction, if you want to override target CPU utilization to 80, you should set autoscaling_metric_specs.metric_name to `aiplatform.googleapis.com/prediction/online/cpu/utilization` and autoscaling_metric_specs.target to `80`. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::Integer + name: 'target' + description: | + The target resource utilization in percentage (1% - 100%) for the given metric; once the real usage deviates from the target by a certain percentage, the machine replicas change. The default value is 60 (representing 60%) if not provided. + - !ruby/object:Api::Type::String + name: 'metricName' + description: | + Required. The resource metric name. Supported metrics: * For Online Prediction: * `aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle` * `aiplatform.googleapis.com/prediction/online/cpu/utilization` + - !ruby/object:Api::Type::Integer + name: 'minReplicaCount' + description: | + Required. Immutable. The minimum number of machine replicas this DeployedModel will be always deployed on. This value must be greater than or equal to 1. If traffic against the DeployedModel increases, it may dynamically be deployed onto more replicas, and as traffic decreases, some of these extra replicas may be freed. + - !ruby/object:Api::Type::NestedObject + name: 'automaticResources' + description: | + A description of resources that to large degree are decided by Vertex AI, and require only a modest additional configuration. Each Model supporting these resources documents its specific guidelines. + properties: + - !ruby/object:Api::Type::Integer + name: 'maxReplicaCount' + description: | + Immutable. The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If the requested value is too large, the deployment will error, but if deployment succeeds then the ability to scale the model to that many replicas is guaranteed (barring service outages). If traffic against the DeployedModel increases beyond what its replicas at maximum may handle, a portion of the traffic will be dropped. If this value is not provided, a no upper bound for scaling under heavy traffic will be assume, though Vertex AI may be unable to scale beyond certain replica number. + - !ruby/object:Api::Type::Integer + name: 'minReplicaCount' + description: | + Immutable. The minimum number of replicas this DeployedModel will be always deployed on. If traffic against it increases, it may dynamically be deployed onto more replicas up to max_replica_count, and as traffic decreases, some of these extra replicas may be freed. If the requested value is too large, the deployment will error. + - !ruby/object:Api::Type::String + name: 'displayName' + description: | + The display name of the DeployedModel. If not provided upon creation, the Model's display_name is used. + - !ruby/object:Api::Type::String + name: 'model' + description: | + Required. The resource name of the Model that this is the deployment of. Note that the Model may be in a different location than the DeployedModel's Endpoint. The resource name may contain version id or version alias to specify the version. Example: `projects/{project}/locations/{location}/models/{model}@2` or `projects/{project}/locations/{location}/models/{model}@golden` if no version is specified, the default version will be deployed. + - !ruby/object:Api::Type::String + name: 'id' + description: | + Immutable. The ID of the DeployedModel. If not provided upon deployment, Vertex AI will generate a value for this ID. This value should be 1-10 characters, and valid characters are /[0-9]/. + - !ruby/object:Api::Type::String + name: 'network' + description: | + Optional. The full name of the Google Compute Engine [network](https://cloud.google.com//compute/docs/networks-and-firewalls#networks) to which the Endpoint should be peered. Private services access must already be configured for the network. If left unspecified, the Endpoint is not peered with any network. Only one of the fields, network or enable_private_service_connect, can be set. [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert): `projects/{project}/global/networks/{network}`. Where `{project}` is a project number, as in `12345`, and `{network}` is network name. + - !ruby/object:Api::Type::NestedObject + name: 'trafficSplit' + description: | + A map from a DeployedModel's ID to the percentage of this Endpoint's traffic that should be forwarded to that DeployedModel. If a DeployedModel's ID is not listed in this map, then it receives no traffic. The traffic percentage values must add up to 100, or map must be empty if the Endpoint is to not accept any traffic at a moment. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + The labels with user-defined metadata to organize your Endpoints. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::String + name: 'displayName' + description: | + Required. The display name of the Endpoint. The name can be up to 128 characters long and can consist of any UTF-8 characters. + - !ruby/object:Api::Type::NestedObject + name: 'predictRequestResponseLoggingConfig' + description: | + Configuration for logging request-response to a BigQuery table. + properties: + - !ruby/object:Api::Type::Integer + name: 'samplingRate' + description: | + Percentage of requests to be logged, expressed as a fraction in range(0,1]. + - !ruby/object:Api::Type::Boolean + name: 'enabled' + description: | + If logging is enabled or not. + - !ruby/object:Api::Type::NestedObject + name: 'bigqueryDestination' + description: | + The BigQuery location for the output content. + properties: + - !ruby/object:Api::Type::String + name: 'outputUri' + description: | + Required. BigQuery URI to a project or table, up to 2000 characters long. When only the project is specified, the Dataset and Table is created. When the full table reference is specified, the Dataset must exist and table must not exist. Accepted forms: * BigQuery path. For example: `bq://projectId` or `bq://projectId.bqDatasetId` or `bq://projectId.bqDatasetId.bqTableId`. + - !ruby/object:Api::Type::String + name: 'etag' + description: | + Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Output only. Timestamp when this Endpoint was created. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Output only. The resource name of the Endpoint. + + + + - !ruby/object:Api::Resource + name: Endpoint + base_url: '{{parent}}/endpoints' + self_link: '{{name}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/vertex-ai/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + Models are deployed into it, and afterwards Endpoint is called to obtain predictions and explanations. + properties: + + - !ruby/object:Api::Type::NestedObject + name: 'encryptionSpec' + description: | + Represents a customer-managed encryption key spec that can be applied to a top-level resource. + properties: + - !ruby/object:Api::Type::String + name: 'kmsKeyName' + description: | + Required. The Cloud KMS resource identifier of the customer managed encryption key used to protect a resource. Has the form: `projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key`. The key needs to be in the same region as where the compute resource is created. + - !ruby/object:Api::Type::Boolean + name: 'enablePrivateServiceConnect' + description: | + Deprecated: If true, expose the Endpoint via private service connect. Only one of the fields, network or enable_private_service_connect, can be set. + - !ruby/object:Api::Type::String + name: 'updateTime' + description: | + Output only. Timestamp when this Endpoint was last updated. + - !ruby/object:Api::Type::String + name: 'modelDeploymentMonitoringJob' + description: | + Output only. Resource name of the Model Monitoring job associated with this Endpoint if monitoring is enabled by JobService.CreateModelDeploymentMonitoringJob. Format: `projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}` + - !ruby/object:Api::Type::String + name: 'description' + description: | + The description of the Endpoint. + - !ruby/object:Api::Type::Array + name: 'deployedModels' + description: | + Output only. The models deployed in this Endpoint. To add or remove DeployedModels use EndpointService.DeployModel and EndpointService.UndeployModel respectively. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Output only. Timestamp when the DeployedModel was created. + - !ruby/object:Api::Type::NestedObject + name: 'privateEndpoints' + description: | + PrivateEndpoints proto is used to provide paths for users to send requests privately. To send request via private service access, use predict_http_uri, explain_http_uri or health_http_uri. To send request via private service connect, use service_attachment. + properties: + - !ruby/object:Api::Type::String + name: 'healthHttpUri' + description: | + Output only. Http(s) path to send health check requests. + - !ruby/object:Api::Type::String + name: 'explainHttpUri' + description: | + Output only. Http(s) path to send explain requests. + - !ruby/object:Api::Type::String + name: 'predictHttpUri' + description: | + Output only. Http(s) path to send prediction requests. + - !ruby/object:Api::Type::String + name: 'serviceAttachment' + description: | + Output only. The name of the service attachment resource. Populated if private service connect is enabled. + - !ruby/object:Api::Type::Boolean + name: 'disableContainerLogging' + description: | + For custom-trained Models and AutoML Tabular Models, the container of the DeployedModel instances will send `stderr` and `stdout` streams to Cloud Logging by default. Please note that the logs incur cost, which are subject to [Cloud Logging pricing](https://cloud.google.com/logging/pricing). User can disable container logging by setting this flag to true. + - !ruby/object:Api::Type::String + name: 'modelVersionId' + description: | + Output only. The version ID of the model that is deployed. + - !ruby/object:Api::Type::NestedObject + name: 'explanationSpec' + description: | + Specification of Model explanation. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'parameters' + description: | + Parameters to configure explaining for Model's predictions. + properties: + - !ruby/object:Api::Type::Array + name: 'outputIndices' + description: | + If populated, only returns attributions that have output_index contained in output_indices. It must be an ndarray of integers, with the same shape of the output it's explaining. If not populated, returns attributions for top_k indices of outputs. If neither top_k nor output_indices is populated, returns the argmax index of the outputs. Only applicable to Models that predict multiple outputs (e,g, multi-class Models that predict multiple classes). + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'examples' + description: | + Example-based explainability that returns the nearest neighbors from the provided dataset. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'presets' + description: | + Preset configuration for example-based explanations + properties: + - !ruby/object:Api::Type::Enum + name: 'modality' + description: | + The modality of the uploaded model, which automatically configures the distance measurement and feature normalization for the underlying example index and queries. If your model does not precisely fit one of these types, it is okay to choose the closest type. + values: + - :MODALITY_UNSPECIFIED + - :IMAGE + - :TEXT + - :TABULAR + - !ruby/object:Api::Type::Enum + name: 'query' + description: | + Preset option controlling parameters for speed-precision trade-off when querying for examples. If omitted, defaults to `PRECISE`. + values: + - :PRECISE + - :FAST + - !ruby/object:Api::Type::Integer + name: 'neighborCount' + description: | + The number of neighbors to return when querying for examples. + - !ruby/object:Api::Type::NestedObject + name: 'exampleGcsSource' + description: | + The Cloud Storage input instances. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'gcsSource' + description: | + The Google Cloud Storage location for the input content. + properties: + - !ruby/object:Api::Type::Array + name: 'uris' + description: | + Required. Google Cloud Storage URI(-s) to the input file(s). May contain wildcards. For more information on wildcards, see https://cloud.google.com/storage/docs/gsutil/addlhelp/WildcardNames. + item_type: Api::Type::String + - !ruby/object:Api::Type::Enum + name: 'dataFormat' + description: | + The format in which instances are given, if not specified, assume it's JSONL format. Currently only JSONL format is supported. + values: + - :DATA_FORMAT_UNSPECIFIED + - :JSONL + - !ruby/object:Api::Type::String + name: 'nearestNeighborSearchConfig' + description: | + The full configuration for the generated index, the semantics are the same as metadata and should match [NearestNeighborSearchConfig](https://cloud.google.com/vertex-ai/docs/explainable-ai/configuring-explanations-example-based#nearest-neighbor-search-config). + - !ruby/object:Api::Type::NestedObject + name: 'xraiAttribution' + description: | + An explanation method that redistributes Integrated Gradients attributions to segmented regions, taking advantage of the model's fully differentiable structure. Refer to this paper for more details: https://arxiv.org/abs/1906.02825 Supported only by image Models. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'smoothGradConfig' + description: | + Config for SmoothGrad approximation of gradients. When enabled, the gradients are approximated by averaging the gradients from noisy samples in the vicinity of the inputs. Adding noise can help improve the computed gradients. Refer to this paper for more details: https://arxiv.org/pdf/1706.03825.pdf + properties: + - !ruby/object:Api::Type::NestedObject + name: 'featureNoiseSigma' + description: | + Noise sigma by features. Noise sigma represents the standard deviation of the gaussian kernel that will be used to add noise to interpolated inputs prior to computing gradients. + properties: + - !ruby/object:Api::Type::Array + name: 'noiseSigma' + description: | + Noise sigma per feature. No noise is added to features that are not set. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'name' + description: | + The name of the input feature for which noise sigma is provided. The features are defined in explanation metadata inputs. + - !ruby/object:Api::Type::Integer + name: 'sigma' + description: | + This represents the standard deviation of the Gaussian kernel that will be used to add noise to the feature prior to computing gradients. Similar to noise_sigma but represents the noise added to the current feature. Defaults to 0.1. + - !ruby/object:Api::Type::Integer + name: 'noiseSigma' + description: | + This is a single float value and will be used to add noise to all the features. Use this field when all features are normalized to have the same distribution: scale to range [0, 1], [-1, 1] or z-scoring, where features are normalized to have 0-mean and 1-variance. Learn more about [normalization](https://developers.google.com/machine-learning/data-prep/transform/normalization). For best results the recommended value is about 10% - 20% of the standard deviation of the input feature. Refer to section 3.2 of the SmoothGrad paper: https://arxiv.org/pdf/1706.03825.pdf. Defaults to 0.1. If the distribution is different per feature, set feature_noise_sigma instead for each feature. + - !ruby/object:Api::Type::Integer + name: 'noisySampleCount' + description: | + The number of gradient samples to use for approximation. The higher this number, the more accurate the gradient is, but the runtime complexity increases by this factor as well. Valid range of its value is [1, 50]. Defaults to 3. + - !ruby/object:Api::Type::Integer + name: 'stepCount' + description: | + Required. The number of steps for approximating the path integral. A good value to start is 50 and gradually increase until the sum to diff property is met within the desired error range. Valid range of its value is [1, 100], inclusively. + - !ruby/object:Api::Type::NestedObject + name: 'blurBaselineConfig' + description: | + Config for blur baseline. When enabled, a linear path from the maximally blurred image to the input image is created. Using a blurred baseline instead of zero (black image) is motivated by the BlurIG approach explained here: https://arxiv.org/abs/2004.03383 + properties: + - !ruby/object:Api::Type::Integer + name: 'maxBlurSigma' + description: | + The standard deviation of the blur kernel for the blurred baseline. The same blurring parameter is used for both the height and the width dimension. If not set, the method defaults to the zero (i.e. black for images) baseline. + - !ruby/object:Api::Type::Integer + name: 'topK' + description: | + If populated, returns attributions for top K indices of outputs (defaults to 1). Only applies to Models that predicts more than one outputs (e,g, multi-class Models). When set to -1, returns explanations for all outputs. + - !ruby/object:Api::Type::NestedObject + name: 'integratedGradientsAttribution' + description: | + An attribution method that computes the Aumann-Shapley value taking advantage of the model's fully differentiable structure. Refer to this paper for more details: https://arxiv.org/abs/1703.01365 + properties: + - !ruby/object:Api::Type::Integer + name: 'stepCount' + description: | + Required. The number of steps for approximating the path integral. A good value to start is 50 and gradually increase until the sum to diff property is within the desired error range. Valid range of its value is [1, 100], inclusively. + - !ruby/object:Api::Type::NestedObject + name: 'smoothGradConfig' + description: | + Config for SmoothGrad approximation of gradients. When enabled, the gradients are approximated by averaging the gradients from noisy samples in the vicinity of the inputs. Adding noise can help improve the computed gradients. Refer to this paper for more details: https://arxiv.org/pdf/1706.03825.pdf + properties: + - !ruby/object:Api::Type::NestedObject + name: 'featureNoiseSigma' + description: | + Noise sigma by features. Noise sigma represents the standard deviation of the gaussian kernel that will be used to add noise to interpolated inputs prior to computing gradients. + properties: + - !ruby/object:Api::Type::Array + name: 'noiseSigma' + description: | + Noise sigma per feature. No noise is added to features that are not set. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'name' + description: | + The name of the input feature for which noise sigma is provided. The features are defined in explanation metadata inputs. + - !ruby/object:Api::Type::Integer + name: 'sigma' + description: | + This represents the standard deviation of the Gaussian kernel that will be used to add noise to the feature prior to computing gradients. Similar to noise_sigma but represents the noise added to the current feature. Defaults to 0.1. + - !ruby/object:Api::Type::Integer + name: 'noiseSigma' + description: | + This is a single float value and will be used to add noise to all the features. Use this field when all features are normalized to have the same distribution: scale to range [0, 1], [-1, 1] or z-scoring, where features are normalized to have 0-mean and 1-variance. Learn more about [normalization](https://developers.google.com/machine-learning/data-prep/transform/normalization). For best results the recommended value is about 10% - 20% of the standard deviation of the input feature. Refer to section 3.2 of the SmoothGrad paper: https://arxiv.org/pdf/1706.03825.pdf. Defaults to 0.1. If the distribution is different per feature, set feature_noise_sigma instead for each feature. + - !ruby/object:Api::Type::Integer + name: 'noisySampleCount' + description: | + The number of gradient samples to use for approximation. The higher this number, the more accurate the gradient is, but the runtime complexity increases by this factor as well. Valid range of its value is [1, 50]. Defaults to 3. + - !ruby/object:Api::Type::NestedObject + name: 'blurBaselineConfig' + description: | + Config for blur baseline. When enabled, a linear path from the maximally blurred image to the input image is created. Using a blurred baseline instead of zero (black image) is motivated by the BlurIG approach explained here: https://arxiv.org/abs/2004.03383 + properties: + - !ruby/object:Api::Type::Integer + name: 'maxBlurSigma' + description: | + The standard deviation of the blur kernel for the blurred baseline. The same blurring parameter is used for both the height and the width dimension. If not set, the method defaults to the zero (i.e. black for images) baseline. + - !ruby/object:Api::Type::NestedObject + name: 'sampledShapleyAttribution' + description: | + An attribution method that approximates Shapley values for features that contribute to the label being predicted. A sampling strategy is used to approximate the value rather than considering all subsets of features. + properties: + - !ruby/object:Api::Type::Integer + name: 'pathCount' + description: | + Required. The number of feature permutations to consider when approximating the Shapley values. Valid range of its value is [1, 50], inclusively. + - !ruby/object:Api::Type::NestedObject + name: 'metadata' + description: | + Metadata describing the Model's input and output for explanation. + properties: + - !ruby/object:Api::Type::String + name: 'featureAttributionsSchemaUri' + description: | + Points to a YAML file stored on Google Cloud Storage describing the format of the feature attributions. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). AutoML tabular Models always have this field populated by Vertex AI. Note: The URI given on output may be different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. + - !ruby/object:Api::Type::String + name: 'latentSpaceSource' + description: | + Name of the source to generate embeddings for example based explanations. + - !ruby/object:Api::Type::NestedObject + name: 'outputs' + description: | + Required. Map from output names to output metadata. For Vertex AI-provided Tensorflow images, keys can be any user defined string that consists of any UTF-8 characters. For custom images, keys are the name of the output field in the prediction to be explained. Currently only one key is allowed. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + Metadata of the prediction output to be explained. + - !ruby/object:Api::Type::NestedObject + name: 'inputs' + description: | + Required. Map from feature names to feature input metadata. Keys are the name of the features. Values are the specification of the feature. An empty InputMetadata is valid. It describes a text feature which has the name specified as the key in ExplanationMetadata.inputs. The baseline of the empty feature is chosen by Vertex AI. For Vertex AI-provided Tensorflow images, the key can be any friendly name of the feature. Once specified, featureAttributions are keyed by this key (if not grouped with another feature). For custom images, the key must match with the key in instance. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + Metadata of the input of a feature. Fields other than InputMetadata.input_baselines are applicable only for Models that are using Vertex AI-provided images for Tensorflow. + - !ruby/object:Api::Type::Boolean + name: 'enableAccessLogging' + description: | + If true, online prediction access logs are sent to Cloud Logging. These logs are like standard server access logs, containing information like timestamp and latency for each prediction request. Note that logs may incur a cost, especially if your project receives prediction requests at a high queries per second rate (QPS). Estimate your costs before enabling this option. + - !ruby/object:Api::Type::String + name: 'serviceAccount' + description: | + The service account that the DeployedModel's container runs as. Specify the email address of the service account. If this service account is not specified, the container runs as a service account that doesn't have access to the resource project. Users deploying the Model must have the `iam.serviceAccounts.actAs` permission on this service account. + - !ruby/object:Api::Type::NestedObject + name: 'dedicatedResources' + description: | + A description of resources that are dedicated to a DeployedModel, and that need a higher degree of manual configuration. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'machineSpec' + description: | + Specification of a single machine. + properties: + - !ruby/object:Api::Type::Enum + name: 'acceleratorType' + description: | + Immutable. The type of accelerator(s) that may be attached to the machine as per accelerator_count. + values: + - :ACCELERATOR_TYPE_UNSPECIFIED + - :NVIDIA_TESLA_K80 + - :NVIDIA_TESLA_P100 + - :NVIDIA_TESLA_V100 + - :NVIDIA_TESLA_P4 + - :NVIDIA_TESLA_T4 + - :NVIDIA_TESLA_A100 + - :NVIDIA_A100_80GB + - :NVIDIA_L4 + - :TPU_V2 + - :TPU_V3 + - :TPU_V4_POD + - !ruby/object:Api::Type::String + name: 'machineType' + description: | + Immutable. The type of the machine. See the [list of machine types supported for prediction](https://cloud.google.com/vertex-ai/docs/predictions/configure-compute#machine-types) See the [list of machine types supported for custom training](https://cloud.google.com/vertex-ai/docs/training/configure-compute#machine-types). For DeployedModel this field is optional, and the default value is `n1-standard-2`. For BatchPredictionJob or as part of WorkerPoolSpec this field is required. + - !ruby/object:Api::Type::Integer + name: 'acceleratorCount' + description: | + The number of accelerators to attach to the machine. + - !ruby/object:Api::Type::Integer + name: 'maxReplicaCount' + description: | + Immutable. The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If the requested value is too large, the deployment will error, but if deployment succeeds then the ability to scale the model to that many replicas is guaranteed (barring service outages). If traffic against the DeployedModel increases beyond what its replicas at maximum may handle, a portion of the traffic will be dropped. If this value is not provided, will use min_replica_count as the default value. The value of this field impacts the charge against Vertex CPU and GPU quotas. Specifically, you will be charged for (max_replica_count * number of cores in the selected machine type) and (max_replica_count * number of GPUs per replica in the selected machine type). + - !ruby/object:Api::Type::Array + name: 'autoscalingMetricSpecs' + description: | + Immutable. The metric specifications that overrides a resource utilization metric (CPU utilization, accelerator's duty cycle, and so on) target value (default to 60 if not set). At most one entry is allowed per metric. If machine_spec.accelerator_count is above 0, the autoscaling will be based on both CPU utilization and accelerator's duty cycle metrics and scale up when either metrics exceeds its target value while scale down if both metrics are under their target value. The default target value is 60 for both metrics. If machine_spec.accelerator_count is 0, the autoscaling will be based on CPU utilization metric only with default target value 60 if not explicitly set. For example, in the case of Online Prediction, if you want to override target CPU utilization to 80, you should set autoscaling_metric_specs.metric_name to `aiplatform.googleapis.com/prediction/online/cpu/utilization` and autoscaling_metric_specs.target to `80`. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::Integer + name: 'target' + description: | + The target resource utilization in percentage (1% - 100%) for the given metric; once the real usage deviates from the target by a certain percentage, the machine replicas change. The default value is 60 (representing 60%) if not provided. + - !ruby/object:Api::Type::String + name: 'metricName' + description: | + Required. The resource metric name. Supported metrics: * For Online Prediction: * `aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle` * `aiplatform.googleapis.com/prediction/online/cpu/utilization` + - !ruby/object:Api::Type::Integer + name: 'minReplicaCount' + description: | + Required. Immutable. The minimum number of machine replicas this DeployedModel will be always deployed on. This value must be greater than or equal to 1. If traffic against the DeployedModel increases, it may dynamically be deployed onto more replicas, and as traffic decreases, some of these extra replicas may be freed. + - !ruby/object:Api::Type::NestedObject + name: 'automaticResources' + description: | + A description of resources that to large degree are decided by Vertex AI, and require only a modest additional configuration. Each Model supporting these resources documents its specific guidelines. + properties: + - !ruby/object:Api::Type::Integer + name: 'maxReplicaCount' + description: | + Immutable. The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If the requested value is too large, the deployment will error, but if deployment succeeds then the ability to scale the model to that many replicas is guaranteed (barring service outages). If traffic against the DeployedModel increases beyond what its replicas at maximum may handle, a portion of the traffic will be dropped. If this value is not provided, a no upper bound for scaling under heavy traffic will be assume, though Vertex AI may be unable to scale beyond certain replica number. + - !ruby/object:Api::Type::Integer + name: 'minReplicaCount' + description: | + Immutable. The minimum number of replicas this DeployedModel will be always deployed on. If traffic against it increases, it may dynamically be deployed onto more replicas up to max_replica_count, and as traffic decreases, some of these extra replicas may be freed. If the requested value is too large, the deployment will error. + - !ruby/object:Api::Type::String + name: 'displayName' + description: | + The display name of the DeployedModel. If not provided upon creation, the Model's display_name is used. + - !ruby/object:Api::Type::String + name: 'model' + description: | + Required. The resource name of the Model that this is the deployment of. Note that the Model may be in a different location than the DeployedModel's Endpoint. The resource name may contain version id or version alias to specify the version. Example: `projects/{project}/locations/{location}/models/{model}@2` or `projects/{project}/locations/{location}/models/{model}@golden` if no version is specified, the default version will be deployed. + - !ruby/object:Api::Type::String + name: 'id' + description: | + Immutable. The ID of the DeployedModel. If not provided upon deployment, Vertex AI will generate a value for this ID. This value should be 1-10 characters, and valid characters are /[0-9]/. + - !ruby/object:Api::Type::String + name: 'network' + description: | + Optional. The full name of the Google Compute Engine [network](https://cloud.google.com//compute/docs/networks-and-firewalls#networks) to which the Endpoint should be peered. Private services access must already be configured for the network. If left unspecified, the Endpoint is not peered with any network. Only one of the fields, network or enable_private_service_connect, can be set. [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert): `projects/{project}/global/networks/{network}`. Where `{project}` is a project number, as in `12345`, and `{network}` is network name. + - !ruby/object:Api::Type::NestedObject + name: 'trafficSplit' + description: | + A map from a DeployedModel's ID to the percentage of this Endpoint's traffic that should be forwarded to that DeployedModel. If a DeployedModel's ID is not listed in this map, then it receives no traffic. The traffic percentage values must add up to 100, or map must be empty if the Endpoint is to not accept any traffic at a moment. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + The labels with user-defined metadata to organize your Endpoints. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::String + name: 'displayName' + description: | + Required. The display name of the Endpoint. The name can be up to 128 characters long and can consist of any UTF-8 characters. + - !ruby/object:Api::Type::NestedObject + name: 'predictRequestResponseLoggingConfig' + description: | + Configuration for logging request-response to a BigQuery table. + properties: + - !ruby/object:Api::Type::Integer + name: 'samplingRate' + description: | + Percentage of requests to be logged, expressed as a fraction in range(0,1]. + - !ruby/object:Api::Type::Boolean + name: 'enabled' + description: | + If logging is enabled or not. + - !ruby/object:Api::Type::NestedObject + name: 'bigqueryDestination' + description: | + The BigQuery location for the output content. + properties: + - !ruby/object:Api::Type::String + name: 'outputUri' + description: | + Required. BigQuery URI to a project or table, up to 2000 characters long. When only the project is specified, the Dataset and Table is created. When the full table reference is specified, the Dataset must exist and table must not exist. Accepted forms: * BigQuery path. For example: `bq://projectId` or `bq://projectId.bqDatasetId` or `bq://projectId.bqDatasetId.bqTableId`. + - !ruby/object:Api::Type::String + name: 'etag' + description: | + Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Output only. Timestamp when this Endpoint was created. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Output only. The resource name of the Endpoint. + + + + - !ruby/object:Api::Resource + name: TensorboardsExperiment + base_url: '{{parent}}/experiments' + self_link: '{{name}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/vertex-ai/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + A TensorboardExperiment is a group of TensorboardRuns, that are typically the results of a training job run, in a Tensorboard. + properties: + + - !ruby/object:Api::Type::String + name: 'description' + description: | + Description of this TensorboardExperiment. + - !ruby/object:Api::Type::String + name: 'source' + description: | + Immutable. Source of the TensorboardExperiment. Example: a custom training job. + - !ruby/object:Api::Type::String + name: 'displayName' + description: | + User provided name of this TensorboardExperiment. + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Output only. Timestamp when this TensorboardExperiment was created. + - !ruby/object:Api::Type::String + name: 'updateTime' + description: | + Output only. Timestamp when this TensorboardExperiment was last updated. + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + The labels with user-defined metadata to organize your TensorboardExperiment. Label keys and values cannot be longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. No more than 64 user labels can be associated with one Dataset (System labels are excluded). See https://goo.gl/xmQnxf for more information and examples of labels. System reserved label keys are prefixed with `aiplatform.googleapis.com/` and are immutable. The following system labels exist for each Dataset: * `aiplatform.googleapis.com/dataset_metadata_schema`: output only. Its value is the metadata_schema's title. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::String + name: 'name' + description: | + Output only. Name of the TensorboardExperiment. Format: `projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}` + - !ruby/object:Api::Type::String + name: 'etag' + description: | + Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + + + + - !ruby/object:Api::Resource + name: TensorboardsExperiment + base_url: '{{parent}}/experiments' + self_link: '{{name}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/vertex-ai/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + A TensorboardExperiment is a group of TensorboardRuns, that are typically the results of a training job run, in a Tensorboard. + properties: + + - !ruby/object:Api::Type::String + name: 'description' + description: | + Description of this TensorboardExperiment. + - !ruby/object:Api::Type::String + name: 'source' + description: | + Immutable. Source of the TensorboardExperiment. Example: a custom training job. + - !ruby/object:Api::Type::String + name: 'displayName' + description: | + User provided name of this TensorboardExperiment. + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Output only. Timestamp when this TensorboardExperiment was created. + - !ruby/object:Api::Type::String + name: 'updateTime' + description: | + Output only. Timestamp when this TensorboardExperiment was last updated. + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + The labels with user-defined metadata to organize your TensorboardExperiment. Label keys and values cannot be longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. No more than 64 user labels can be associated with one Dataset (System labels are excluded). See https://goo.gl/xmQnxf for more information and examples of labels. System reserved label keys are prefixed with `aiplatform.googleapis.com/` and are immutable. The following system labels exist for each Dataset: * `aiplatform.googleapis.com/dataset_metadata_schema`: output only. Its value is the metadata_schema's title. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::String + name: 'name' + description: | + Output only. Name of the TensorboardExperiment. Format: `projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}` + - !ruby/object:Api::Type::String + name: 'etag' + description: | + Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + + + + - !ruby/object:Api::Resource + name: ModelsEvaluation + base_url: '{{parent}}/evaluations' + self_link: '{{name}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/vertex-ai/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + A collection of metrics calculated by comparing Model's predictions on all of the test data against annotations from the test data. + properties: + + - !ruby/object:Api::Type::String + name: 'dataItemSchemaUri' + description: | + Points to a YAML file stored on Google Cloud Storage describing EvaluatedDataItemView.data_item_payload and EvaluatedAnnotation.data_item_payload. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). This field is not populated if there are neither EvaluatedDataItemViews nor EvaluatedAnnotations under this ModelEvaluation. + - !ruby/object:Api::Type::String + name: 'metadata' + description: | + The metadata of the ModelEvaluation. For the ModelEvaluation uploaded from Managed Pipeline, metadata contains a structured value with keys of "pipeline_job_id", "evaluation_dataset_type", "evaluation_dataset_path". + - !ruby/object:Api::Type::String + name: 'metricsSchemaUri' + description: | + Points to a YAML file stored on Google Cloud Storage describing the metrics of this ModelEvaluation. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Output only. Timestamp when this ModelEvaluation was created. + - !ruby/object:Api::Type::String + name: 'annotationSchemaUri' + description: | + Points to a YAML file stored on Google Cloud Storage describing EvaluatedDataItemView.predictions, EvaluatedDataItemView.ground_truths, EvaluatedAnnotation.predictions, and EvaluatedAnnotation.ground_truths. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). This field is not populated if there are neither EvaluatedDataItemViews nor EvaluatedAnnotations under this ModelEvaluation. + - !ruby/object:Api::Type::String + name: 'metrics' + description: | + Evaluation metrics of the Model. The schema of the metrics is stored in metrics_schema_uri + - !ruby/object:Api::Type::Array + name: 'explanationSpecs' + description: | + Describes the values of ExplanationSpec that are used for explaining the predicted values on the evaluated data. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::NestedObject + name: 'explanationSpec' + description: | + Specification of Model explanation. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'parameters' + description: | + Parameters to configure explaining for Model's predictions. + properties: + - !ruby/object:Api::Type::Array + name: 'outputIndices' + description: | + If populated, only returns attributions that have output_index contained in output_indices. It must be an ndarray of integers, with the same shape of the output it's explaining. If not populated, returns attributions for top_k indices of outputs. If neither top_k nor output_indices is populated, returns the argmax index of the outputs. Only applicable to Models that predict multiple outputs (e,g, multi-class Models that predict multiple classes). + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'examples' + description: | + Example-based explainability that returns the nearest neighbors from the provided dataset. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'presets' + description: | + Preset configuration for example-based explanations + properties: + - !ruby/object:Api::Type::Enum + name: 'modality' + description: | + The modality of the uploaded model, which automatically configures the distance measurement and feature normalization for the underlying example index and queries. If your model does not precisely fit one of these types, it is okay to choose the closest type. + values: + - :MODALITY_UNSPECIFIED + - :IMAGE + - :TEXT + - :TABULAR + - !ruby/object:Api::Type::Enum + name: 'query' + description: | + Preset option controlling parameters for speed-precision trade-off when querying for examples. If omitted, defaults to `PRECISE`. + values: + - :PRECISE + - :FAST + - !ruby/object:Api::Type::Integer + name: 'neighborCount' + description: | + The number of neighbors to return when querying for examples. + - !ruby/object:Api::Type::NestedObject + name: 'exampleGcsSource' + description: | + The Cloud Storage input instances. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'gcsSource' + description: | + The Google Cloud Storage location for the input content. + properties: + - !ruby/object:Api::Type::Array + name: 'uris' + description: | + Required. Google Cloud Storage URI(-s) to the input file(s). May contain wildcards. For more information on wildcards, see https://cloud.google.com/storage/docs/gsutil/addlhelp/WildcardNames. + item_type: Api::Type::String + - !ruby/object:Api::Type::Enum + name: 'dataFormat' + description: | + The format in which instances are given, if not specified, assume it's JSONL format. Currently only JSONL format is supported. + values: + - :DATA_FORMAT_UNSPECIFIED + - :JSONL + - !ruby/object:Api::Type::String + name: 'nearestNeighborSearchConfig' + description: | + The full configuration for the generated index, the semantics are the same as metadata and should match [NearestNeighborSearchConfig](https://cloud.google.com/vertex-ai/docs/explainable-ai/configuring-explanations-example-based#nearest-neighbor-search-config). + - !ruby/object:Api::Type::NestedObject + name: 'xraiAttribution' + description: | + An explanation method that redistributes Integrated Gradients attributions to segmented regions, taking advantage of the model's fully differentiable structure. Refer to this paper for more details: https://arxiv.org/abs/1906.02825 Supported only by image Models. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'smoothGradConfig' + description: | + Config for SmoothGrad approximation of gradients. When enabled, the gradients are approximated by averaging the gradients from noisy samples in the vicinity of the inputs. Adding noise can help improve the computed gradients. Refer to this paper for more details: https://arxiv.org/pdf/1706.03825.pdf + properties: + - !ruby/object:Api::Type::NestedObject + name: 'featureNoiseSigma' + description: | + Noise sigma by features. Noise sigma represents the standard deviation of the gaussian kernel that will be used to add noise to interpolated inputs prior to computing gradients. + properties: + - !ruby/object:Api::Type::Array + name: 'noiseSigma' + description: | + Noise sigma per feature. No noise is added to features that are not set. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'name' + description: | + The name of the input feature for which noise sigma is provided. The features are defined in explanation metadata inputs. + - !ruby/object:Api::Type::Integer + name: 'sigma' + description: | + This represents the standard deviation of the Gaussian kernel that will be used to add noise to the feature prior to computing gradients. Similar to noise_sigma but represents the noise added to the current feature. Defaults to 0.1. + - !ruby/object:Api::Type::Integer + name: 'noiseSigma' + description: | + This is a single float value and will be used to add noise to all the features. Use this field when all features are normalized to have the same distribution: scale to range [0, 1], [-1, 1] or z-scoring, where features are normalized to have 0-mean and 1-variance. Learn more about [normalization](https://developers.google.com/machine-learning/data-prep/transform/normalization). For best results the recommended value is about 10% - 20% of the standard deviation of the input feature. Refer to section 3.2 of the SmoothGrad paper: https://arxiv.org/pdf/1706.03825.pdf. Defaults to 0.1. If the distribution is different per feature, set feature_noise_sigma instead for each feature. + - !ruby/object:Api::Type::Integer + name: 'noisySampleCount' + description: | + The number of gradient samples to use for approximation. The higher this number, the more accurate the gradient is, but the runtime complexity increases by this factor as well. Valid range of its value is [1, 50]. Defaults to 3. + - !ruby/object:Api::Type::Integer + name: 'stepCount' + description: | + Required. The number of steps for approximating the path integral. A good value to start is 50 and gradually increase until the sum to diff property is met within the desired error range. Valid range of its value is [1, 100], inclusively. + - !ruby/object:Api::Type::NestedObject + name: 'blurBaselineConfig' + description: | + Config for blur baseline. When enabled, a linear path from the maximally blurred image to the input image is created. Using a blurred baseline instead of zero (black image) is motivated by the BlurIG approach explained here: https://arxiv.org/abs/2004.03383 + properties: + - !ruby/object:Api::Type::Integer + name: 'maxBlurSigma' + description: | + The standard deviation of the blur kernel for the blurred baseline. The same blurring parameter is used for both the height and the width dimension. If not set, the method defaults to the zero (i.e. black for images) baseline. + - !ruby/object:Api::Type::Integer + name: 'topK' + description: | + If populated, returns attributions for top K indices of outputs (defaults to 1). Only applies to Models that predicts more than one outputs (e,g, multi-class Models). When set to -1, returns explanations for all outputs. + - !ruby/object:Api::Type::NestedObject + name: 'integratedGradientsAttribution' + description: | + An attribution method that computes the Aumann-Shapley value taking advantage of the model's fully differentiable structure. Refer to this paper for more details: https://arxiv.org/abs/1703.01365 + properties: + - !ruby/object:Api::Type::Integer + name: 'stepCount' + description: | + Required. The number of steps for approximating the path integral. A good value to start is 50 and gradually increase until the sum to diff property is within the desired error range. Valid range of its value is [1, 100], inclusively. + - !ruby/object:Api::Type::NestedObject + name: 'smoothGradConfig' + description: | + Config for SmoothGrad approximation of gradients. When enabled, the gradients are approximated by averaging the gradients from noisy samples in the vicinity of the inputs. Adding noise can help improve the computed gradients. Refer to this paper for more details: https://arxiv.org/pdf/1706.03825.pdf + properties: + - !ruby/object:Api::Type::NestedObject + name: 'featureNoiseSigma' + description: | + Noise sigma by features. Noise sigma represents the standard deviation of the gaussian kernel that will be used to add noise to interpolated inputs prior to computing gradients. + properties: + - !ruby/object:Api::Type::Array + name: 'noiseSigma' + description: | + Noise sigma per feature. No noise is added to features that are not set. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'name' + description: | + The name of the input feature for which noise sigma is provided. The features are defined in explanation metadata inputs. + - !ruby/object:Api::Type::Integer + name: 'sigma' + description: | + This represents the standard deviation of the Gaussian kernel that will be used to add noise to the feature prior to computing gradients. Similar to noise_sigma but represents the noise added to the current feature. Defaults to 0.1. + - !ruby/object:Api::Type::Integer + name: 'noiseSigma' + description: | + This is a single float value and will be used to add noise to all the features. Use this field when all features are normalized to have the same distribution: scale to range [0, 1], [-1, 1] or z-scoring, where features are normalized to have 0-mean and 1-variance. Learn more about [normalization](https://developers.google.com/machine-learning/data-prep/transform/normalization). For best results the recommended value is about 10% - 20% of the standard deviation of the input feature. Refer to section 3.2 of the SmoothGrad paper: https://arxiv.org/pdf/1706.03825.pdf. Defaults to 0.1. If the distribution is different per feature, set feature_noise_sigma instead for each feature. + - !ruby/object:Api::Type::Integer + name: 'noisySampleCount' + description: | + The number of gradient samples to use for approximation. The higher this number, the more accurate the gradient is, but the runtime complexity increases by this factor as well. Valid range of its value is [1, 50]. Defaults to 3. + - !ruby/object:Api::Type::NestedObject + name: 'blurBaselineConfig' + description: | + Config for blur baseline. When enabled, a linear path from the maximally blurred image to the input image is created. Using a blurred baseline instead of zero (black image) is motivated by the BlurIG approach explained here: https://arxiv.org/abs/2004.03383 + properties: + - !ruby/object:Api::Type::Integer + name: 'maxBlurSigma' + description: | + The standard deviation of the blur kernel for the blurred baseline. The same blurring parameter is used for both the height and the width dimension. If not set, the method defaults to the zero (i.e. black for images) baseline. + - !ruby/object:Api::Type::NestedObject + name: 'sampledShapleyAttribution' + description: | + An attribution method that approximates Shapley values for features that contribute to the label being predicted. A sampling strategy is used to approximate the value rather than considering all subsets of features. + properties: + - !ruby/object:Api::Type::Integer + name: 'pathCount' + description: | + Required. The number of feature permutations to consider when approximating the Shapley values. Valid range of its value is [1, 50], inclusively. + - !ruby/object:Api::Type::NestedObject + name: 'metadata' + description: | + Metadata describing the Model's input and output for explanation. + properties: + - !ruby/object:Api::Type::String + name: 'featureAttributionsSchemaUri' + description: | + Points to a YAML file stored on Google Cloud Storage describing the format of the feature attributions. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). AutoML tabular Models always have this field populated by Vertex AI. Note: The URI given on output may be different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. + - !ruby/object:Api::Type::String + name: 'latentSpaceSource' + description: | + Name of the source to generate embeddings for example based explanations. + - !ruby/object:Api::Type::NestedObject + name: 'outputs' + description: | + Required. Map from output names to output metadata. For Vertex AI-provided Tensorflow images, keys can be any user defined string that consists of any UTF-8 characters. For custom images, keys are the name of the output field in the prediction to be explained. Currently only one key is allowed. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + Metadata of the prediction output to be explained. + - !ruby/object:Api::Type::NestedObject + name: 'inputs' + description: | + Required. Map from feature names to feature input metadata. Keys are the name of the features. Values are the specification of the feature. An empty InputMetadata is valid. It describes a text feature which has the name specified as the key in ExplanationMetadata.inputs. The baseline of the empty feature is chosen by Vertex AI. For Vertex AI-provided Tensorflow images, the key can be any friendly name of the feature. Once specified, featureAttributions are keyed by this key (if not grouped with another feature). For custom images, the key must match with the key in instance. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + Metadata of the input of a feature. Fields other than InputMetadata.input_baselines are applicable only for Models that are using Vertex AI-provided images for Tensorflow. + - !ruby/object:Api::Type::String + name: 'explanationType' + description: | + Explanation type. For AutoML Image Classification models, possible values are: * `image-integrated-gradients` * `image-xrai` + - !ruby/object:Api::Type::Array + name: 'sliceDimensions' + description: | + All possible dimensions of ModelEvaluationSlices. The dimensions can be used as the filter of the ModelService.ListModelEvaluationSlices request, in the form of `slice.dimension = `. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'modelExplanation' + description: | + Aggregated explanation metrics for a Model over a set of instances. + properties: + - !ruby/object:Api::Type::Array + name: 'meanAttributions' + description: | + Output only. Aggregated attributions explaining the Model's prediction outputs over the set of instances. The attributions are grouped by outputs. For Models that predict only one output, such as regression Models that predict only one score, there is only one attibution that explains the predicted output. For Models that predict multiple outputs, such as multiclass Models that predict multiple classes, each element explains one specific item. Attribution.output_index can be used to identify which output this attribution is explaining. The baselineOutputValue, instanceOutputValue and featureAttributions fields are averaged over the test data. NOTE: Currently AutoML tabular classification Models produce only one attribution, which averages attributions over all the classes it predicts. Attribution.approximation_error is not populated. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'outputDisplayName' + description: | + Output only. The display name of the output identified by output_index. For example, the predicted class name by a multi-classification Model. This field is only populated iff the Model predicts display names as a separate field along with the explained output. The predicted display name must has the same shape of the explained output, and can be located using output_index. + - !ruby/object:Api::Type::Integer + name: 'baselineOutputValue' + description: | + Output only. Model predicted output if the input instance is constructed from the baselines of all the features defined in ExplanationMetadata.inputs. The field name of the output is determined by the key in ExplanationMetadata.outputs. If the Model's predicted output has multiple dimensions (rank > 1), this is the value in the output located by output_index. If there are multiple baselines, their output values are averaged. + - !ruby/object:Api::Type::String + name: 'outputName' + description: | + Output only. Name of the explain output. Specified as the key in ExplanationMetadata.outputs. + - !ruby/object:Api::Type::String + name: 'featureAttributions' + description: | + Output only. Attributions of each explained feature. Features are extracted from the prediction instances according to explanation metadata for inputs. The value is a struct, whose keys are the name of the feature. The values are how much the feature in the instance contributed to the predicted result. The format of the value is determined by the feature's input format: * If the feature is a scalar value, the attribution value is a floating number. * If the feature is an array of scalar values, the attribution value is an array. * If the feature is a struct, the attribution value is a struct. The keys in the attribution value struct are the same as the keys in the feature struct. The formats of the values in the attribution struct are determined by the formats of the values in the feature struct. The ExplanationMetadata.feature_attributions_schema_uri field, pointed to by the ExplanationSpec field of the Endpoint.deployed_models object, points to the schema file that describes the features and their attribution values (if it is populated). + - !ruby/object:Api::Type::Array + name: 'outputIndex' + description: | + Output only. The index that locates the explained prediction output. If the prediction output is a scalar value, output_index is not populated. If the prediction output has multiple dimensions, the length of the output_index list is the same as the number of dimensions of the output. The i-th element in output_index is the element index of the i-th dimension of the output vector. Indices start from 0. + item_type: Api::Type::String + - !ruby/object:Api::Type::Integer + name: 'instanceOutputValue' + description: | + Output only. Model predicted output on the corresponding explanation instance. The field name of the output is determined by the key in ExplanationMetadata.outputs. If the Model predicted output has multiple dimensions, this is the value in the output located by output_index. + - !ruby/object:Api::Type::Integer + name: 'approximationError' + description: | + Output only. Error of feature_attributions caused by approximation used in the explanation method. Lower value means more precise attributions. * For Sampled Shapley attribution, increasing path_count might reduce the error. * For Integrated Gradients attribution, increasing step_count might reduce the error. * For XRAI attribution, increasing step_count might reduce the error. See [this introduction](/vertex-ai/docs/explainable-ai/overview) for more information. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Output only. The resource name of the ModelEvaluation. + - !ruby/object:Api::Type::String + name: 'displayName' + description: | + The display name of the ModelEvaluation. + + + + - !ruby/object:Api::Resource + name: ModelsEvaluation + base_url: '{{parent}}/evaluations' + self_link: '{{name}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/vertex-ai/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + A collection of metrics calculated by comparing Model's predictions on all of the test data against annotations from the test data. + properties: + + - !ruby/object:Api::Type::String + name: 'dataItemSchemaUri' + description: | + Points to a YAML file stored on Google Cloud Storage describing EvaluatedDataItemView.data_item_payload and EvaluatedAnnotation.data_item_payload. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). This field is not populated if there are neither EvaluatedDataItemViews nor EvaluatedAnnotations under this ModelEvaluation. + - !ruby/object:Api::Type::String + name: 'metadata' + description: | + The metadata of the ModelEvaluation. For the ModelEvaluation uploaded from Managed Pipeline, metadata contains a structured value with keys of "pipeline_job_id", "evaluation_dataset_type", "evaluation_dataset_path". + - !ruby/object:Api::Type::String + name: 'metricsSchemaUri' + description: | + Points to a YAML file stored on Google Cloud Storage describing the metrics of this ModelEvaluation. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Output only. Timestamp when this ModelEvaluation was created. + - !ruby/object:Api::Type::String + name: 'annotationSchemaUri' + description: | + Points to a YAML file stored on Google Cloud Storage describing EvaluatedDataItemView.predictions, EvaluatedDataItemView.ground_truths, EvaluatedAnnotation.predictions, and EvaluatedAnnotation.ground_truths. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). This field is not populated if there are neither EvaluatedDataItemViews nor EvaluatedAnnotations under this ModelEvaluation. + - !ruby/object:Api::Type::String + name: 'metrics' + description: | + Evaluation metrics of the Model. The schema of the metrics is stored in metrics_schema_uri + - !ruby/object:Api::Type::Array + name: 'explanationSpecs' + description: | + Describes the values of ExplanationSpec that are used for explaining the predicted values on the evaluated data. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::NestedObject + name: 'explanationSpec' + description: | + Specification of Model explanation. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'parameters' + description: | + Parameters to configure explaining for Model's predictions. + properties: + - !ruby/object:Api::Type::Array + name: 'outputIndices' + description: | + If populated, only returns attributions that have output_index contained in output_indices. It must be an ndarray of integers, with the same shape of the output it's explaining. If not populated, returns attributions for top_k indices of outputs. If neither top_k nor output_indices is populated, returns the argmax index of the outputs. Only applicable to Models that predict multiple outputs (e,g, multi-class Models that predict multiple classes). + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'examples' + description: | + Example-based explainability that returns the nearest neighbors from the provided dataset. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'presets' + description: | + Preset configuration for example-based explanations + properties: + - !ruby/object:Api::Type::Enum + name: 'modality' + description: | + The modality of the uploaded model, which automatically configures the distance measurement and feature normalization for the underlying example index and queries. If your model does not precisely fit one of these types, it is okay to choose the closest type. + values: + - :MODALITY_UNSPECIFIED + - :IMAGE + - :TEXT + - :TABULAR + - !ruby/object:Api::Type::Enum + name: 'query' + description: | + Preset option controlling parameters for speed-precision trade-off when querying for examples. If omitted, defaults to `PRECISE`. + values: + - :PRECISE + - :FAST + - !ruby/object:Api::Type::Integer + name: 'neighborCount' + description: | + The number of neighbors to return when querying for examples. + - !ruby/object:Api::Type::NestedObject + name: 'exampleGcsSource' + description: | + The Cloud Storage input instances. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'gcsSource' + description: | + The Google Cloud Storage location for the input content. + properties: + - !ruby/object:Api::Type::Array + name: 'uris' + description: | + Required. Google Cloud Storage URI(-s) to the input file(s). May contain wildcards. For more information on wildcards, see https://cloud.google.com/storage/docs/gsutil/addlhelp/WildcardNames. + item_type: Api::Type::String + - !ruby/object:Api::Type::Enum + name: 'dataFormat' + description: | + The format in which instances are given, if not specified, assume it's JSONL format. Currently only JSONL format is supported. + values: + - :DATA_FORMAT_UNSPECIFIED + - :JSONL + - !ruby/object:Api::Type::String + name: 'nearestNeighborSearchConfig' + description: | + The full configuration for the generated index, the semantics are the same as metadata and should match [NearestNeighborSearchConfig](https://cloud.google.com/vertex-ai/docs/explainable-ai/configuring-explanations-example-based#nearest-neighbor-search-config). + - !ruby/object:Api::Type::NestedObject + name: 'xraiAttribution' + description: | + An explanation method that redistributes Integrated Gradients attributions to segmented regions, taking advantage of the model's fully differentiable structure. Refer to this paper for more details: https://arxiv.org/abs/1906.02825 Supported only by image Models. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'smoothGradConfig' + description: | + Config for SmoothGrad approximation of gradients. When enabled, the gradients are approximated by averaging the gradients from noisy samples in the vicinity of the inputs. Adding noise can help improve the computed gradients. Refer to this paper for more details: https://arxiv.org/pdf/1706.03825.pdf + properties: + - !ruby/object:Api::Type::NestedObject + name: 'featureNoiseSigma' + description: | + Noise sigma by features. Noise sigma represents the standard deviation of the gaussian kernel that will be used to add noise to interpolated inputs prior to computing gradients. + properties: + - !ruby/object:Api::Type::Array + name: 'noiseSigma' + description: | + Noise sigma per feature. No noise is added to features that are not set. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'name' + description: | + The name of the input feature for which noise sigma is provided. The features are defined in explanation metadata inputs. + - !ruby/object:Api::Type::Integer + name: 'sigma' + description: | + This represents the standard deviation of the Gaussian kernel that will be used to add noise to the feature prior to computing gradients. Similar to noise_sigma but represents the noise added to the current feature. Defaults to 0.1. + - !ruby/object:Api::Type::Integer + name: 'noiseSigma' + description: | + This is a single float value and will be used to add noise to all the features. Use this field when all features are normalized to have the same distribution: scale to range [0, 1], [-1, 1] or z-scoring, where features are normalized to have 0-mean and 1-variance. Learn more about [normalization](https://developers.google.com/machine-learning/data-prep/transform/normalization). For best results the recommended value is about 10% - 20% of the standard deviation of the input feature. Refer to section 3.2 of the SmoothGrad paper: https://arxiv.org/pdf/1706.03825.pdf. Defaults to 0.1. If the distribution is different per feature, set feature_noise_sigma instead for each feature. + - !ruby/object:Api::Type::Integer + name: 'noisySampleCount' + description: | + The number of gradient samples to use for approximation. The higher this number, the more accurate the gradient is, but the runtime complexity increases by this factor as well. Valid range of its value is [1, 50]. Defaults to 3. + - !ruby/object:Api::Type::Integer + name: 'stepCount' + description: | + Required. The number of steps for approximating the path integral. A good value to start is 50 and gradually increase until the sum to diff property is met within the desired error range. Valid range of its value is [1, 100], inclusively. + - !ruby/object:Api::Type::NestedObject + name: 'blurBaselineConfig' + description: | + Config for blur baseline. When enabled, a linear path from the maximally blurred image to the input image is created. Using a blurred baseline instead of zero (black image) is motivated by the BlurIG approach explained here: https://arxiv.org/abs/2004.03383 + properties: + - !ruby/object:Api::Type::Integer + name: 'maxBlurSigma' + description: | + The standard deviation of the blur kernel for the blurred baseline. The same blurring parameter is used for both the height and the width dimension. If not set, the method defaults to the zero (i.e. black for images) baseline. + - !ruby/object:Api::Type::Integer + name: 'topK' + description: | + If populated, returns attributions for top K indices of outputs (defaults to 1). Only applies to Models that predicts more than one outputs (e,g, multi-class Models). When set to -1, returns explanations for all outputs. + - !ruby/object:Api::Type::NestedObject + name: 'integratedGradientsAttribution' + description: | + An attribution method that computes the Aumann-Shapley value taking advantage of the model's fully differentiable structure. Refer to this paper for more details: https://arxiv.org/abs/1703.01365 + properties: + - !ruby/object:Api::Type::Integer + name: 'stepCount' + description: | + Required. The number of steps for approximating the path integral. A good value to start is 50 and gradually increase until the sum to diff property is within the desired error range. Valid range of its value is [1, 100], inclusively. + - !ruby/object:Api::Type::NestedObject + name: 'smoothGradConfig' + description: | + Config for SmoothGrad approximation of gradients. When enabled, the gradients are approximated by averaging the gradients from noisy samples in the vicinity of the inputs. Adding noise can help improve the computed gradients. Refer to this paper for more details: https://arxiv.org/pdf/1706.03825.pdf + properties: + - !ruby/object:Api::Type::NestedObject + name: 'featureNoiseSigma' + description: | + Noise sigma by features. Noise sigma represents the standard deviation of the gaussian kernel that will be used to add noise to interpolated inputs prior to computing gradients. + properties: + - !ruby/object:Api::Type::Array + name: 'noiseSigma' + description: | + Noise sigma per feature. No noise is added to features that are not set. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'name' + description: | + The name of the input feature for which noise sigma is provided. The features are defined in explanation metadata inputs. + - !ruby/object:Api::Type::Integer + name: 'sigma' + description: | + This represents the standard deviation of the Gaussian kernel that will be used to add noise to the feature prior to computing gradients. Similar to noise_sigma but represents the noise added to the current feature. Defaults to 0.1. + - !ruby/object:Api::Type::Integer + name: 'noiseSigma' + description: | + This is a single float value and will be used to add noise to all the features. Use this field when all features are normalized to have the same distribution: scale to range [0, 1], [-1, 1] or z-scoring, where features are normalized to have 0-mean and 1-variance. Learn more about [normalization](https://developers.google.com/machine-learning/data-prep/transform/normalization). For best results the recommended value is about 10% - 20% of the standard deviation of the input feature. Refer to section 3.2 of the SmoothGrad paper: https://arxiv.org/pdf/1706.03825.pdf. Defaults to 0.1. If the distribution is different per feature, set feature_noise_sigma instead for each feature. + - !ruby/object:Api::Type::Integer + name: 'noisySampleCount' + description: | + The number of gradient samples to use for approximation. The higher this number, the more accurate the gradient is, but the runtime complexity increases by this factor as well. Valid range of its value is [1, 50]. Defaults to 3. + - !ruby/object:Api::Type::NestedObject + name: 'blurBaselineConfig' + description: | + Config for blur baseline. When enabled, a linear path from the maximally blurred image to the input image is created. Using a blurred baseline instead of zero (black image) is motivated by the BlurIG approach explained here: https://arxiv.org/abs/2004.03383 + properties: + - !ruby/object:Api::Type::Integer + name: 'maxBlurSigma' + description: | + The standard deviation of the blur kernel for the blurred baseline. The same blurring parameter is used for both the height and the width dimension. If not set, the method defaults to the zero (i.e. black for images) baseline. + - !ruby/object:Api::Type::NestedObject + name: 'sampledShapleyAttribution' + description: | + An attribution method that approximates Shapley values for features that contribute to the label being predicted. A sampling strategy is used to approximate the value rather than considering all subsets of features. + properties: + - !ruby/object:Api::Type::Integer + name: 'pathCount' + description: | + Required. The number of feature permutations to consider when approximating the Shapley values. Valid range of its value is [1, 50], inclusively. + - !ruby/object:Api::Type::NestedObject + name: 'metadata' + description: | + Metadata describing the Model's input and output for explanation. + properties: + - !ruby/object:Api::Type::String + name: 'featureAttributionsSchemaUri' + description: | + Points to a YAML file stored on Google Cloud Storage describing the format of the feature attributions. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). AutoML tabular Models always have this field populated by Vertex AI. Note: The URI given on output may be different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. + - !ruby/object:Api::Type::String + name: 'latentSpaceSource' + description: | + Name of the source to generate embeddings for example based explanations. + - !ruby/object:Api::Type::NestedObject + name: 'outputs' + description: | + Required. Map from output names to output metadata. For Vertex AI-provided Tensorflow images, keys can be any user defined string that consists of any UTF-8 characters. For custom images, keys are the name of the output field in the prediction to be explained. Currently only one key is allowed. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + Metadata of the prediction output to be explained. + - !ruby/object:Api::Type::NestedObject + name: 'inputs' + description: | + Required. Map from feature names to feature input metadata. Keys are the name of the features. Values are the specification of the feature. An empty InputMetadata is valid. It describes a text feature which has the name specified as the key in ExplanationMetadata.inputs. The baseline of the empty feature is chosen by Vertex AI. For Vertex AI-provided Tensorflow images, the key can be any friendly name of the feature. Once specified, featureAttributions are keyed by this key (if not grouped with another feature). For custom images, the key must match with the key in instance. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + Metadata of the input of a feature. Fields other than InputMetadata.input_baselines are applicable only for Models that are using Vertex AI-provided images for Tensorflow. + - !ruby/object:Api::Type::String + name: 'explanationType' + description: | + Explanation type. For AutoML Image Classification models, possible values are: * `image-integrated-gradients` * `image-xrai` + - !ruby/object:Api::Type::Array + name: 'sliceDimensions' + description: | + All possible dimensions of ModelEvaluationSlices. The dimensions can be used as the filter of the ModelService.ListModelEvaluationSlices request, in the form of `slice.dimension = `. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'modelExplanation' + description: | + Aggregated explanation metrics for a Model over a set of instances. + properties: + - !ruby/object:Api::Type::Array + name: 'meanAttributions' + description: | + Output only. Aggregated attributions explaining the Model's prediction outputs over the set of instances. The attributions are grouped by outputs. For Models that predict only one output, such as regression Models that predict only one score, there is only one attibution that explains the predicted output. For Models that predict multiple outputs, such as multiclass Models that predict multiple classes, each element explains one specific item. Attribution.output_index can be used to identify which output this attribution is explaining. The baselineOutputValue, instanceOutputValue and featureAttributions fields are averaged over the test data. NOTE: Currently AutoML tabular classification Models produce only one attribution, which averages attributions over all the classes it predicts. Attribution.approximation_error is not populated. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'outputDisplayName' + description: | + Output only. The display name of the output identified by output_index. For example, the predicted class name by a multi-classification Model. This field is only populated iff the Model predicts display names as a separate field along with the explained output. The predicted display name must has the same shape of the explained output, and can be located using output_index. + - !ruby/object:Api::Type::Integer + name: 'baselineOutputValue' + description: | + Output only. Model predicted output if the input instance is constructed from the baselines of all the features defined in ExplanationMetadata.inputs. The field name of the output is determined by the key in ExplanationMetadata.outputs. If the Model's predicted output has multiple dimensions (rank > 1), this is the value in the output located by output_index. If there are multiple baselines, their output values are averaged. + - !ruby/object:Api::Type::String + name: 'outputName' + description: | + Output only. Name of the explain output. Specified as the key in ExplanationMetadata.outputs. + - !ruby/object:Api::Type::String + name: 'featureAttributions' + description: | + Output only. Attributions of each explained feature. Features are extracted from the prediction instances according to explanation metadata for inputs. The value is a struct, whose keys are the name of the feature. The values are how much the feature in the instance contributed to the predicted result. The format of the value is determined by the feature's input format: * If the feature is a scalar value, the attribution value is a floating number. * If the feature is an array of scalar values, the attribution value is an array. * If the feature is a struct, the attribution value is a struct. The keys in the attribution value struct are the same as the keys in the feature struct. The formats of the values in the attribution struct are determined by the formats of the values in the feature struct. The ExplanationMetadata.feature_attributions_schema_uri field, pointed to by the ExplanationSpec field of the Endpoint.deployed_models object, points to the schema file that describes the features and their attribution values (if it is populated). + - !ruby/object:Api::Type::Array + name: 'outputIndex' + description: | + Output only. The index that locates the explained prediction output. If the prediction output is a scalar value, output_index is not populated. If the prediction output has multiple dimensions, the length of the output_index list is the same as the number of dimensions of the output. The i-th element in output_index is the element index of the i-th dimension of the output vector. Indices start from 0. + item_type: Api::Type::String + - !ruby/object:Api::Type::Integer + name: 'instanceOutputValue' + description: | + Output only. Model predicted output on the corresponding explanation instance. The field name of the output is determined by the key in ExplanationMetadata.outputs. If the Model predicted output has multiple dimensions, this is the value in the output located by output_index. + - !ruby/object:Api::Type::Integer + name: 'approximationError' + description: | + Output only. Error of feature_attributions caused by approximation used in the explanation method. Lower value means more precise attributions. * For Sampled Shapley attribution, increasing path_count might reduce the error. * For Integrated Gradients attribution, increasing step_count might reduce the error. * For XRAI attribution, increasing step_count might reduce the error. See [this introduction](/vertex-ai/docs/explainable-ai/overview) for more information. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Output only. The resource name of the ModelEvaluation. + - !ruby/object:Api::Type::String + name: 'displayName' + description: | + The display name of the ModelEvaluation. + + + + - !ruby/object:Api::Resource + name: HyperparameterTuningJob + base_url: '{{parent}}/hyperparameterTuningJobs' + self_link: '{{name}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/vertex-ai/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + Represents a HyperparameterTuningJob. A HyperparameterTuningJob has a Study specification and multiple CustomJobs with identical CustomJob specification. + properties: + + - !ruby/object:Api::Type::String + name: 'studySpec' + description: | + Required. Study configuration of the HyperparameterTuningJob. + - !ruby/object:Api::Type::Array + name: 'trials' + description: | + Output only. Trials of the HyperparameterTuningJob. + item_type: Api::Type::String + - !ruby/object:Api::Type::Enum + name: 'state' + description: | + Output only. The detailed state of the job. + values: + - :JOB_STATE_UNSPECIFIED + - :JOB_STATE_QUEUED + - :JOB_STATE_PENDING + - :JOB_STATE_RUNNING + - :JOB_STATE_SUCCEEDED + - :JOB_STATE_FAILED + - :JOB_STATE_CANCELLING + - :JOB_STATE_CANCELLED + - :JOB_STATE_PAUSED + - :JOB_STATE_EXPIRED + - :JOB_STATE_UPDATING + - :JOB_STATE_PARTIALLY_SUCCEEDED + - !ruby/object:Api::Type::Integer + name: 'maxFailedTrialCount' + description: | + The number of failed Trials that need to be seen before failing the HyperparameterTuningJob. If set to 0, Vertex AI decides how many Trials must fail before the whole job fails. + - !ruby/object:Api::Type::String + name: 'encryptionSpec' + description: | + Customer-managed encryption key options for a HyperparameterTuningJob. If this is set, then all resources created by the HyperparameterTuningJob will be encrypted with the provided encryption key. + - !ruby/object:Api::Type::String + name: 'error' + description: | + Output only. Only populated when job's state is JOB_STATE_FAILED or JOB_STATE_CANCELLED. + - !ruby/object:Api::Type::String + name: 'endTime' + description: | + Output only. Time when the HyperparameterTuningJob entered any of the following states: `JOB_STATE_SUCCEEDED`, `JOB_STATE_FAILED`, `JOB_STATE_CANCELLED`. + - !ruby/object:Api::Type::String + name: 'updateTime' + description: | + Output only. Time when the HyperparameterTuningJob was most recently updated. + - !ruby/object:Api::Type::String + name: 'startTime' + description: | + Output only. Time when the HyperparameterTuningJob for the first time entered the `JOB_STATE_RUNNING` state. + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + The labels with user-defined metadata to organize HyperparameterTuningJobs. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Output only. Time when the HyperparameterTuningJob was created. + - !ruby/object:Api::Type::Integer + name: 'parallelTrialCount' + description: | + Required. The desired number of Trials to run in parallel. + - !ruby/object:Api::Type::String + name: 'trialJobSpec' + description: | + Required. The spec of a trial job. The same spec applies to the CustomJobs created in all the trials. + - !ruby/object:Api::Type::Integer + name: 'maxTrialCount' + description: | + Required. The desired total number of Trials. + - !ruby/object:Api::Type::String + name: 'displayName' + description: | + Required. The display name of the HyperparameterTuningJob. The name can be up to 128 characters long and can consist of any UTF-8 characters. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Output only. Resource name of the HyperparameterTuningJob. + + + + - !ruby/object:Api::Resource + name: StudiesTrial + base_url: '{{parent}}/trials' + self_link: '{{name}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/vertex-ai/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + A message representing a Trial. A Trial contains a unique set of Parameters that has been or will be evaluated, along with the objective metrics got by running the Trial. + properties: + + - !ruby/object:Api::Type::Array + name: 'measurements' + description: | + Output only. A list of measurements that are strictly lexicographically ordered by their induced tuples (steps, elapsed_duration). These are used for early stopping computations. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::Array + name: 'metrics' + description: | + Output only. A list of metrics got by evaluating the objective functions using suggested Parameter values. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'metricId' + description: | + Output only. The ID of the Metric. The Metric should be defined in StudySpec's Metrics. + - !ruby/object:Api::Type::Integer + name: 'value' + description: | + Output only. The value for this metric. + - !ruby/object:Api::Type::String + name: 'elapsedDuration' + description: | + Output only. Time that the Trial has been running at the point of this Measurement. + - !ruby/object:Api::Type::String + name: 'stepCount' + description: | + Output only. The number of steps the machine learning model has been trained for. Must be non-negative. + - !ruby/object:Api::Type::String + name: 'startTime' + description: | + Output only. Time when the Trial was started. + - !ruby/object:Api::Type::String + name: 'endTime' + description: | + Output only. Time when the Trial's status changed to `SUCCEEDED` or `INFEASIBLE`. + - !ruby/object:Api::Type::Array + name: 'parameters' + description: | + Output only. The parameters of the Trial. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'value' + description: | + Output only. The value of the parameter. `number_value` will be set if a parameter defined in StudySpec is in type 'INTEGER', 'DOUBLE' or 'DISCRETE'. `string_value` will be set if a parameter defined in StudySpec is in type 'CATEGORICAL'. + - !ruby/object:Api::Type::String + name: 'parameterId' + description: | + Output only. The ID of the parameter. The parameter should be defined in StudySpec's Parameters. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Output only. Resource name of the Trial assigned by the service. + - !ruby/object:Api::Type::String + name: 'infeasibleReason' + description: | + Output only. A human readable string describing why the Trial is infeasible. This is set only if Trial state is `INFEASIBLE`. + - !ruby/object:Api::Type::NestedObject + name: 'finalMeasurement' + description: | + A message representing a Measurement of a Trial. A Measurement contains the Metrics got by executing a Trial using suggested hyperparameter values. + properties: + - !ruby/object:Api::Type::Array + name: 'metrics' + description: | + Output only. A list of metrics got by evaluating the objective functions using suggested Parameter values. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'metricId' + description: | + Output only. The ID of the Metric. The Metric should be defined in StudySpec's Metrics. + - !ruby/object:Api::Type::Integer + name: 'value' + description: | + Output only. The value for this metric. + - !ruby/object:Api::Type::String + name: 'elapsedDuration' + description: | + Output only. Time that the Trial has been running at the point of this Measurement. + - !ruby/object:Api::Type::String + name: 'stepCount' + description: | + Output only. The number of steps the machine learning model has been trained for. Must be non-negative. + - !ruby/object:Api::Type::String + name: 'clientId' + description: | + Output only. The identifier of the client that originally requested this Trial. Each client is identified by a unique client_id. When a client asks for a suggestion, Vertex AI Vizier will assign it a Trial. The client should evaluate the Trial, complete it, and report back to Vertex AI Vizier. If suggestion is asked again by same client_id before the Trial is completed, the same Trial will be returned. Multiple clients with different client_ids can ask for suggestions simultaneously, each of them will get their own Trial. + - !ruby/object:Api::Type::String + name: 'customJob' + description: | + Output only. The CustomJob name linked to the Trial. It's set for a HyperparameterTuningJob's Trial. + - !ruby/object:Api::Type::Enum + name: 'state' + description: | + Output only. The detailed state of the Trial. + values: + - :STATE_UNSPECIFIED + - :REQUESTED + - :ACTIVE + - :STOPPING + - :SUCCEEDED + - :INFEASIBLE + - !ruby/object:Api::Type::NestedObject + name: 'webAccessUris' + description: | + Output only. URIs for accessing [interactive shells](https://cloud.google.com/vertex-ai/docs/training/monitor-debug-interactive-shell) (one URI for each training node). Only available if this trial is part of a HyperparameterTuningJob and the job's trial_job_spec.enable_web_access field is `true`. The keys are names of each node used for the trial; for example, `workerpool0-0` for the primary node, `workerpool1-0` for the first node in the second worker pool, and `workerpool1-1` for the second node in the second worker pool. The values are the URIs for each node's interactive shell. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::String + name: 'id' + description: | + Output only. The identifier of the Trial assigned by the service. + + + + - !ruby/object:Api::Resource + name: StudiesTrial + base_url: '{{parent}}/trials' + self_link: '{{name}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/vertex-ai/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + A message representing a Trial. A Trial contains a unique set of Parameters that has been or will be evaluated, along with the objective metrics got by running the Trial. + properties: + + - !ruby/object:Api::Type::Array + name: 'measurements' + description: | + Output only. A list of measurements that are strictly lexicographically ordered by their induced tuples (steps, elapsed_duration). These are used for early stopping computations. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::Array + name: 'metrics' + description: | + Output only. A list of metrics got by evaluating the objective functions using suggested Parameter values. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'metricId' + description: | + Output only. The ID of the Metric. The Metric should be defined in StudySpec's Metrics. + - !ruby/object:Api::Type::Integer + name: 'value' + description: | + Output only. The value for this metric. + - !ruby/object:Api::Type::String + name: 'elapsedDuration' + description: | + Output only. Time that the Trial has been running at the point of this Measurement. + - !ruby/object:Api::Type::String + name: 'stepCount' + description: | + Output only. The number of steps the machine learning model has been trained for. Must be non-negative. + - !ruby/object:Api::Type::String + name: 'startTime' + description: | + Output only. Time when the Trial was started. + - !ruby/object:Api::Type::String + name: 'endTime' + description: | + Output only. Time when the Trial's status changed to `SUCCEEDED` or `INFEASIBLE`. + - !ruby/object:Api::Type::Array + name: 'parameters' + description: | + Output only. The parameters of the Trial. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'value' + description: | + Output only. The value of the parameter. `number_value` will be set if a parameter defined in StudySpec is in type 'INTEGER', 'DOUBLE' or 'DISCRETE'. `string_value` will be set if a parameter defined in StudySpec is in type 'CATEGORICAL'. + - !ruby/object:Api::Type::String + name: 'parameterId' + description: | + Output only. The ID of the parameter. The parameter should be defined in StudySpec's Parameters. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Output only. Resource name of the Trial assigned by the service. + - !ruby/object:Api::Type::String + name: 'infeasibleReason' + description: | + Output only. A human readable string describing why the Trial is infeasible. This is set only if Trial state is `INFEASIBLE`. + - !ruby/object:Api::Type::NestedObject + name: 'finalMeasurement' + description: | + A message representing a Measurement of a Trial. A Measurement contains the Metrics got by executing a Trial using suggested hyperparameter values. + properties: + - !ruby/object:Api::Type::Array + name: 'metrics' + description: | + Output only. A list of metrics got by evaluating the objective functions using suggested Parameter values. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'metricId' + description: | + Output only. The ID of the Metric. The Metric should be defined in StudySpec's Metrics. + - !ruby/object:Api::Type::Integer + name: 'value' + description: | + Output only. The value for this metric. + - !ruby/object:Api::Type::String + name: 'elapsedDuration' + description: | + Output only. Time that the Trial has been running at the point of this Measurement. + - !ruby/object:Api::Type::String + name: 'stepCount' + description: | + Output only. The number of steps the machine learning model has been trained for. Must be non-negative. + - !ruby/object:Api::Type::String + name: 'clientId' + description: | + Output only. The identifier of the client that originally requested this Trial. Each client is identified by a unique client_id. When a client asks for a suggestion, Vertex AI Vizier will assign it a Trial. The client should evaluate the Trial, complete it, and report back to Vertex AI Vizier. If suggestion is asked again by same client_id before the Trial is completed, the same Trial will be returned. Multiple clients with different client_ids can ask for suggestions simultaneously, each of them will get their own Trial. + - !ruby/object:Api::Type::String + name: 'customJob' + description: | + Output only. The CustomJob name linked to the Trial. It's set for a HyperparameterTuningJob's Trial. + - !ruby/object:Api::Type::Enum + name: 'state' + description: | + Output only. The detailed state of the Trial. + values: + - :STATE_UNSPECIFIED + - :REQUESTED + - :ACTIVE + - :STOPPING + - :SUCCEEDED + - :INFEASIBLE + - !ruby/object:Api::Type::NestedObject + name: 'webAccessUris' + description: | + Output only. URIs for accessing [interactive shells](https://cloud.google.com/vertex-ai/docs/training/monitor-debug-interactive-shell) (one URI for each training node). Only available if this trial is part of a HyperparameterTuningJob and the job's trial_job_spec.enable_web_access field is `true`. The keys are names of each node used for the trial; for example, `workerpool0-0` for the primary node, `workerpool1-0` for the first node in the second worker pool, and `workerpool1-1` for the second node in the second worker pool. The values are the URIs for each node's interactive shell. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::String + name: 'id' + description: | + Output only. The identifier of the Trial assigned by the service. + + + + - !ruby/object:Api::Resource + name: FeaturestoresEntityType + base_url: '{{parent}}/entityTypes' + self_link: '{{name}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/vertex-ai/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + An entity type is a type of object in a system that needs to be modeled and have stored information about. For example, driver is an entity type, and driver0 is an instance of an entity type driver. + properties: + + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + Optional. The labels with user-defined metadata to organize your EntityTypes. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information on and examples of labels. No more than 64 user labels can be associated with one EntityType (System labels are excluded)." System reserved label keys are prefixed with "aiplatform.googleapis.com/" and are immutable. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::String + name: 'description' + description: | + Optional. Description of the EntityType. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Immutable. Name of the EntityType. Format: `projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}` The last part entity_type is assigned by the client. The entity_type can be up to 64 characters long and can consist only of ASCII Latin letters A-Z and a-z and underscore(_), and ASCII digits 0-9 starting with a letter. The value will be unique given a featurestore. + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Output only. Timestamp when this EntityType was created. + - !ruby/object:Api::Type::NestedObject + name: 'monitoringConfig' + description: | + Configuration of how features in Featurestore are monitored. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'importFeaturesAnalysis' + description: | + Configuration of the Featurestore's ImportFeature Analysis Based Monitoring. This type of analysis generates statistics for values of each Feature imported by every ImportFeatureValues operation. + properties: + - !ruby/object:Api::Type::Enum + name: 'anomalyDetectionBaseline' + description: | + The baseline used to do anomaly detection for the statistics generated by import features analysis. + values: + - :BASELINE_UNSPECIFIED + - :LATEST_STATS + - :MOST_RECENT_SNAPSHOT_STATS + - :PREVIOUS_IMPORT_FEATURES_STATS + - !ruby/object:Api::Type::Enum + name: 'state' + description: | + Whether to enable / disable / inherite default hebavior for import features analysis. + values: + - :STATE_UNSPECIFIED + - :DEFAULT + - :ENABLED + - :DISABLED + - !ruby/object:Api::Type::NestedObject + name: 'numericalThresholdConfig' + description: | + The config for Featurestore Monitoring threshold. + properties: + - !ruby/object:Api::Type::Integer + name: 'value' + description: | + Specify a threshold value that can trigger the alert. 1. For categorical feature, the distribution distance is calculated by L-inifinity norm. 2. For numerical feature, the distribution distance is calculated by Jensen–Shannon divergence. Each feature must have a non-zero threshold if they need to be monitored. Otherwise no alert will be triggered for that feature. + - !ruby/object:Api::Type::NestedObject + name: 'categoricalThresholdConfig' + description: | + The config for Featurestore Monitoring threshold. + properties: + - !ruby/object:Api::Type::Integer + name: 'value' + description: | + Specify a threshold value that can trigger the alert. 1. For categorical feature, the distribution distance is calculated by L-inifinity norm. 2. For numerical feature, the distribution distance is calculated by Jensen–Shannon divergence. Each feature must have a non-zero threshold if they need to be monitored. Otherwise no alert will be triggered for that feature. + - !ruby/object:Api::Type::NestedObject + name: 'snapshotAnalysis' + description: | + Configuration of the Featurestore's Snapshot Analysis Based Monitoring. This type of analysis generates statistics for each Feature based on a snapshot of the latest feature value of each entities every monitoring_interval. + properties: + - !ruby/object:Api::Type::Integer + name: 'monitoringIntervalDays' + description: | + Configuration of the snapshot analysis based monitoring pipeline running interval. The value indicates number of days. + - !ruby/object:Api::Type::Integer + name: 'stalenessDays' + description: | + Customized export features time window for snapshot analysis. Unit is one day. Default value is 3 weeks. Minimum value is 1 day. Maximum value is 4000 days. + - !ruby/object:Api::Type::Boolean + name: 'disabled' + description: | + The monitoring schedule for snapshot analysis. For EntityType-level config: unset / disabled = true indicates disabled by default for Features under it; otherwise by default enable snapshot analysis monitoring with monitoring_interval for Features under it. Feature-level config: disabled = true indicates disabled regardless of the EntityType-level config; unset monitoring_interval indicates going with EntityType-level config; otherwise run snapshot analysis monitoring with monitoring_interval regardless of the EntityType-level config. Explicitly Disable the snapshot analysis based monitoring. + - !ruby/object:Api::Type::String + name: 'etag' + description: | + Optional. Used to perform a consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + - !ruby/object:Api::Type::String + name: 'updateTime' + description: | + Output only. Timestamp when this EntityType was most recently updated. + - !ruby/object:Api::Type::Integer + name: 'offlineStorageTtlDays' + description: | + Optional. Config for data retention policy in offline storage. TTL in days for feature values that will be stored in offline storage. The Feature Store offline storage periodically removes obsolete feature values older than `offline_storage_ttl_days` since the feature generation time. If unset (or explicitly set to 0), default to 4000 days TTL. + + + + - !ruby/object:Api::Resource + name: FeaturestoresEntityType + base_url: '{{parent}}/entityTypes' + self_link: '{{name}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/vertex-ai/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + An entity type is a type of object in a system that needs to be modeled and have stored information about. For example, driver is an entity type, and driver0 is an instance of an entity type driver. + properties: + + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + Optional. The labels with user-defined metadata to organize your EntityTypes. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information on and examples of labels. No more than 64 user labels can be associated with one EntityType (System labels are excluded)." System reserved label keys are prefixed with "aiplatform.googleapis.com/" and are immutable. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::String + name: 'description' + description: | + Optional. Description of the EntityType. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Immutable. Name of the EntityType. Format: `projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}` The last part entity_type is assigned by the client. The entity_type can be up to 64 characters long and can consist only of ASCII Latin letters A-Z and a-z and underscore(_), and ASCII digits 0-9 starting with a letter. The value will be unique given a featurestore. + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Output only. Timestamp when this EntityType was created. + - !ruby/object:Api::Type::NestedObject + name: 'monitoringConfig' + description: | + Configuration of how features in Featurestore are monitored. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'importFeaturesAnalysis' + description: | + Configuration of the Featurestore's ImportFeature Analysis Based Monitoring. This type of analysis generates statistics for values of each Feature imported by every ImportFeatureValues operation. + properties: + - !ruby/object:Api::Type::Enum + name: 'anomalyDetectionBaseline' + description: | + The baseline used to do anomaly detection for the statistics generated by import features analysis. + values: + - :BASELINE_UNSPECIFIED + - :LATEST_STATS + - :MOST_RECENT_SNAPSHOT_STATS + - :PREVIOUS_IMPORT_FEATURES_STATS + - !ruby/object:Api::Type::Enum + name: 'state' + description: | + Whether to enable / disable / inherite default hebavior for import features analysis. + values: + - :STATE_UNSPECIFIED + - :DEFAULT + - :ENABLED + - :DISABLED + - !ruby/object:Api::Type::NestedObject + name: 'numericalThresholdConfig' + description: | + The config for Featurestore Monitoring threshold. + properties: + - !ruby/object:Api::Type::Integer + name: 'value' + description: | + Specify a threshold value that can trigger the alert. 1. For categorical feature, the distribution distance is calculated by L-inifinity norm. 2. For numerical feature, the distribution distance is calculated by Jensen–Shannon divergence. Each feature must have a non-zero threshold if they need to be monitored. Otherwise no alert will be triggered for that feature. + - !ruby/object:Api::Type::NestedObject + name: 'categoricalThresholdConfig' + description: | + The config for Featurestore Monitoring threshold. + properties: + - !ruby/object:Api::Type::Integer + name: 'value' + description: | + Specify a threshold value that can trigger the alert. 1. For categorical feature, the distribution distance is calculated by L-inifinity norm. 2. For numerical feature, the distribution distance is calculated by Jensen–Shannon divergence. Each feature must have a non-zero threshold if they need to be monitored. Otherwise no alert will be triggered for that feature. + - !ruby/object:Api::Type::NestedObject + name: 'snapshotAnalysis' + description: | + Configuration of the Featurestore's Snapshot Analysis Based Monitoring. This type of analysis generates statistics for each Feature based on a snapshot of the latest feature value of each entities every monitoring_interval. + properties: + - !ruby/object:Api::Type::Integer + name: 'monitoringIntervalDays' + description: | + Configuration of the snapshot analysis based monitoring pipeline running interval. The value indicates number of days. + - !ruby/object:Api::Type::Integer + name: 'stalenessDays' + description: | + Customized export features time window for snapshot analysis. Unit is one day. Default value is 3 weeks. Minimum value is 1 day. Maximum value is 4000 days. + - !ruby/object:Api::Type::Boolean + name: 'disabled' + description: | + The monitoring schedule for snapshot analysis. For EntityType-level config: unset / disabled = true indicates disabled by default for Features under it; otherwise by default enable snapshot analysis monitoring with monitoring_interval for Features under it. Feature-level config: disabled = true indicates disabled regardless of the EntityType-level config; unset monitoring_interval indicates going with EntityType-level config; otherwise run snapshot analysis monitoring with monitoring_interval regardless of the EntityType-level config. Explicitly Disable the snapshot analysis based monitoring. + - !ruby/object:Api::Type::String + name: 'etag' + description: | + Optional. Used to perform a consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + - !ruby/object:Api::Type::String + name: 'updateTime' + description: | + Output only. Timestamp when this EntityType was most recently updated. + - !ruby/object:Api::Type::Integer + name: 'offlineStorageTtlDays' + description: | + Optional. Config for data retention policy in offline storage. TTL in days for feature values that will be stored in offline storage. The Feature Store offline storage periodically removes obsolete feature values older than `offline_storage_ttl_days` since the feature generation time. If unset (or explicitly set to 0), default to 4000 days TTL. + + + + - !ruby/object:Api::Resource + name: IndexEndpoint + base_url: '{{parent}}/indexEndpoints' + self_link: '{{name}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/vertex-ai/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + Indexes are deployed into it. An IndexEndpoint can have multiple DeployedIndexes. + properties: + + - !ruby/object:Api::Type::Array + name: 'deployedIndexes' + description: | + Output only. The indexes deployed in this endpoint. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::NestedObject + name: 'privateEndpoints' + description: | + IndexPrivateEndpoints proto is used to provide paths for users to send requests via private endpoints (e.g. private service access, private service connect). To send request via private service access, use match_grpc_address. To send request via private service connect, use service_attachment. + properties: + - !ruby/object:Api::Type::String + name: 'serviceAttachment' + description: | + Output only. The name of the service attachment resource. Populated if private service connect is enabled. + - !ruby/object:Api::Type::String + name: 'matchGrpcAddress' + description: | + Output only. The ip address used to send match gRPC requests. + - !ruby/object:Api::Type::String + name: 'deploymentGroup' + description: | + Optional. The deployment group can be no longer than 64 characters (eg: 'test', 'prod'). If not set, we will use the 'default' deployment group. Creating `deployment_groups` with `reserved_ip_ranges` is a recommended practice when the peered network has multiple peering ranges. This creates your deployments from predictable IP spaces for easier traffic administration. Also, one deployment_group (except 'default') can only be used with the same reserved_ip_ranges which means if the deployment_group has been used with reserved_ip_ranges: [a, b, c], using it with [a, b] or [d, e] is disallowed. Note: we only support up to 5 deployment groups(not including 'default'). + - !ruby/object:Api::Type::NestedObject + name: 'dedicatedResources' + description: | + A description of resources that are dedicated to a DeployedModel, and that need a higher degree of manual configuration. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'machineSpec' + description: | + Specification of a single machine. + properties: + - !ruby/object:Api::Type::Enum + name: 'acceleratorType' + description: | + Immutable. The type of accelerator(s) that may be attached to the machine as per accelerator_count. + values: + - :ACCELERATOR_TYPE_UNSPECIFIED + - :NVIDIA_TESLA_K80 + - :NVIDIA_TESLA_P100 + - :NVIDIA_TESLA_V100 + - :NVIDIA_TESLA_P4 + - :NVIDIA_TESLA_T4 + - :NVIDIA_TESLA_A100 + - :NVIDIA_A100_80GB + - :NVIDIA_L4 + - :TPU_V2 + - :TPU_V3 + - :TPU_V4_POD + - !ruby/object:Api::Type::String + name: 'machineType' + description: | + Immutable. The type of the machine. See the [list of machine types supported for prediction](https://cloud.google.com/vertex-ai/docs/predictions/configure-compute#machine-types) See the [list of machine types supported for custom training](https://cloud.google.com/vertex-ai/docs/training/configure-compute#machine-types). For DeployedModel this field is optional, and the default value is `n1-standard-2`. For BatchPredictionJob or as part of WorkerPoolSpec this field is required. + - !ruby/object:Api::Type::Integer + name: 'acceleratorCount' + description: | + The number of accelerators to attach to the machine. + - !ruby/object:Api::Type::Integer + name: 'maxReplicaCount' + description: | + Immutable. The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If the requested value is too large, the deployment will error, but if deployment succeeds then the ability to scale the model to that many replicas is guaranteed (barring service outages). If traffic against the DeployedModel increases beyond what its replicas at maximum may handle, a portion of the traffic will be dropped. If this value is not provided, will use min_replica_count as the default value. The value of this field impacts the charge against Vertex CPU and GPU quotas. Specifically, you will be charged for (max_replica_count * number of cores in the selected machine type) and (max_replica_count * number of GPUs per replica in the selected machine type). + - !ruby/object:Api::Type::Array + name: 'autoscalingMetricSpecs' + description: | + Immutable. The metric specifications that overrides a resource utilization metric (CPU utilization, accelerator's duty cycle, and so on) target value (default to 60 if not set). At most one entry is allowed per metric. If machine_spec.accelerator_count is above 0, the autoscaling will be based on both CPU utilization and accelerator's duty cycle metrics and scale up when either metrics exceeds its target value while scale down if both metrics are under their target value. The default target value is 60 for both metrics. If machine_spec.accelerator_count is 0, the autoscaling will be based on CPU utilization metric only with default target value 60 if not explicitly set. For example, in the case of Online Prediction, if you want to override target CPU utilization to 80, you should set autoscaling_metric_specs.metric_name to `aiplatform.googleapis.com/prediction/online/cpu/utilization` and autoscaling_metric_specs.target to `80`. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::Integer + name: 'target' + description: | + The target resource utilization in percentage (1% - 100%) for the given metric; once the real usage deviates from the target by a certain percentage, the machine replicas change. The default value is 60 (representing 60%) if not provided. + - !ruby/object:Api::Type::String + name: 'metricName' + description: | + Required. The resource metric name. Supported metrics: * For Online Prediction: * `aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle` * `aiplatform.googleapis.com/prediction/online/cpu/utilization` + - !ruby/object:Api::Type::Integer + name: 'minReplicaCount' + description: | + Required. Immutable. The minimum number of machine replicas this DeployedModel will be always deployed on. This value must be greater than or equal to 1. If traffic against the DeployedModel increases, it may dynamically be deployed onto more replicas, and as traffic decreases, some of these extra replicas may be freed. + - !ruby/object:Api::Type::NestedObject + name: 'deployedIndexAuthConfig' + description: | + Used to set up the auth on the DeployedIndex's private endpoint. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'authProvider' + description: | + Configuration for an authentication provider, including support for [JSON Web Token (JWT)](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32). + properties: + - !ruby/object:Api::Type::Array + name: 'audiences' + description: | + The list of JWT [audiences](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32#section-4.1.3). that are allowed to access. A JWT containing any of these audiences will be accepted. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'allowedIssuers' + description: | + A list of allowed JWT issuers. Each entry must be a valid Google service account, in the following format: `service-account-name@project-id.iam.gserviceaccount.com` + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'id' + description: | + Required. The user specified ID of the DeployedIndex. The ID can be up to 128 characters long and must start with a letter and only contain letters, numbers, and underscores. The ID must be unique within the project it is created in. + - !ruby/object:Api::Type::String + name: 'indexSyncTime' + description: | + Output only. The DeployedIndex may depend on various data on its original Index. Additionally when certain changes to the original Index are being done (e.g. when what the Index contains is being changed) the DeployedIndex may be asynchronously updated in the background to reflect these changes. If this timestamp's value is at least the Index.update_time of the original Index, it means that this DeployedIndex and the original Index are in sync. If this timestamp is older, then to see which updates this DeployedIndex already contains (and which it does not), one must list the operations that are running on the original Index. Only the successfully completed Operations with update_time equal or before this sync time are contained in this DeployedIndex. + - !ruby/object:Api::Type::NestedObject + name: 'automaticResources' + description: | + A description of resources that to large degree are decided by Vertex AI, and require only a modest additional configuration. Each Model supporting these resources documents its specific guidelines. + properties: + - !ruby/object:Api::Type::Integer + name: 'maxReplicaCount' + description: | + Immutable. The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If the requested value is too large, the deployment will error, but if deployment succeeds then the ability to scale the model to that many replicas is guaranteed (barring service outages). If traffic against the DeployedModel increases beyond what its replicas at maximum may handle, a portion of the traffic will be dropped. If this value is not provided, a no upper bound for scaling under heavy traffic will be assume, though Vertex AI may be unable to scale beyond certain replica number. + - !ruby/object:Api::Type::Integer + name: 'minReplicaCount' + description: | + Immutable. The minimum number of replicas this DeployedModel will be always deployed on. If traffic against it increases, it may dynamically be deployed onto more replicas up to max_replica_count, and as traffic decreases, some of these extra replicas may be freed. If the requested value is too large, the deployment will error. + - !ruby/object:Api::Type::Boolean + name: 'enableAccessLogging' + description: | + Optional. If true, private endpoint's access logs are sent to Cloud Logging. These logs are like standard server access logs, containing information like timestamp and latency for each MatchRequest. Note that logs may incur a cost, especially if the deployed index receives a high queries per second rate (QPS). Estimate your costs before enabling this option. + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Output only. Timestamp when the DeployedIndex was created. + - !ruby/object:Api::Type::Array + name: 'reservedIpRanges' + description: | + Optional. A list of reserved ip ranges under the VPC network that can be used for this DeployedIndex. If set, we will deploy the index within the provided ip ranges. Otherwise, the index might be deployed to any ip ranges under the provided VPC network. The value should be the name of the address (https://cloud.google.com/compute/docs/reference/rest/v1/addresses) Example: 'vertex-ai-ip-range'. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'index' + description: | + Required. The name of the Index this is the deployment of. We may refer to this Index as the DeployedIndex's "original" Index. + - !ruby/object:Api::Type::String + name: 'displayName' + description: | + The display name of the DeployedIndex. If not provided upon creation, the Index's display_name is used. + - !ruby/object:Api::Type::NestedObject + name: 'privateServiceConnectConfig' + description: | + Represents configuration for private service connect. + properties: + - !ruby/object:Api::Type::Array + name: 'projectAllowlist' + description: | + A list of Projects from which the forwarding rule will target the service attachment. + item_type: Api::Type::String + - !ruby/object:Api::Type::Boolean + name: 'enablePrivateServiceConnect' + description: | + Required. If true, expose the IndexEndpoint via private service connect. + - !ruby/object:Api::Type::String + name: 'displayName' + description: | + Required. The display name of the IndexEndpoint. The name can be up to 128 characters long and can consist of any UTF-8 characters. + - !ruby/object:Api::Type::Boolean + name: 'publicEndpointEnabled' + description: | + Optional. If true, the deployed index will be accessible through public endpoint. + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + The labels with user-defined metadata to organize your IndexEndpoints. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Output only. Timestamp when this IndexEndpoint was created. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Output only. The resource name of the IndexEndpoint. + - !ruby/object:Api::Type::String + name: 'network' + description: | + Optional. The full name of the Google Compute Engine [network](https://cloud.google.com/compute/docs/networks-and-firewalls#networks) to which the IndexEndpoint should be peered. Private services access must already be configured for the network. If left unspecified, the Endpoint is not peered with any network. network and private_service_connect_config are mutually exclusive. [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert): `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in '12345', and {network} is network name. + - !ruby/object:Api::Type::String + name: 'updateTime' + description: | + Output only. Timestamp when this IndexEndpoint was last updated. This timestamp is not updated when the endpoint's DeployedIndexes are updated, e.g. due to updates of the original Indexes they are the deployments of. + - !ruby/object:Api::Type::String + name: 'publicEndpointDomainName' + description: | + Output only. If public_endpoint_enabled is true, this field will be populated with the domain name to use for this index endpoint. + - !ruby/object:Api::Type::Boolean + name: 'enablePrivateServiceConnect' + description: | + Optional. Deprecated: If true, expose the IndexEndpoint via private service connect. Only one of the fields, network or enable_private_service_connect, can be set. + - !ruby/object:Api::Type::String + name: 'etag' + description: | + Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + - !ruby/object:Api::Type::String + name: 'description' + description: | + The description of the IndexEndpoint. + + + + - !ruby/object:Api::Resource + name: PipelineJob + base_url: '{{parent}}/pipelineJobs' + self_link: '{{name}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/vertex-ai/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + An instance of a machine learning PipelineJob. + properties: + + - !ruby/object:Api::Type::NestedObject + name: 'encryptionSpec' + description: | + Represents a customer-managed encryption key spec that can be applied to a top-level resource. + properties: + - !ruby/object:Api::Type::String + name: 'kmsKeyName' + description: | + Required. The Cloud KMS resource identifier of the customer managed encryption key used to protect a resource. Has the form: `projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key`. The key needs to be in the same region as where the compute resource is created. + - !ruby/object:Api::Type::String + name: 'endTime' + description: | + Output only. Pipeline end time. + - !ruby/object:Api::Type::NestedObject + name: 'error' + description: | + The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). + properties: + - !ruby/object:Api::Type::Array + name: 'details' + description: | + A list of messages that carry the error details. There is a common set of message types for APIs to use. + item_type: Api::Type::String + - !ruby/object:Api::Type::Integer + name: 'code' + description: | + The status code, which should be an enum value of google.rpc.Code. + - !ruby/object:Api::Type::String + name: 'message' + description: | + A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client. + - !ruby/object:Api::Type::String + name: 'updateTime' + description: | + Output only. Timestamp when this PipelineJob was most recently updated. + - !ruby/object:Api::Type::NestedObject + name: 'jobDetail' + description: | + The runtime detail of PipelineJob. + properties: + - !ruby/object:Api::Type::Array + name: 'taskDetails' + description: | + Output only. The runtime details of the tasks under the pipeline. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::NestedObject + name: 'inputs' + description: | + Output only. The runtime input artifacts of the task. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + A list of artifact metadata. + - !ruby/object:Api::Type::Array + name: 'pipelineTaskStatus' + description: | + Output only. A list of task status. This field keeps a record of task status evolving over time. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'updateTime' + description: | + Output only. Update time of this status. + - !ruby/object:Api::Type::NestedObject + name: 'error' + description: | + The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). + properties: + - !ruby/object:Api::Type::Array + name: 'details' + description: | + A list of messages that carry the error details. There is a common set of message types for APIs to use. + item_type: Api::Type::String + - !ruby/object:Api::Type::Integer + name: 'code' + description: | + The status code, which should be an enum value of google.rpc.Code. + - !ruby/object:Api::Type::String + name: 'message' + description: | + A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client. + - !ruby/object:Api::Type::Enum + name: 'state' + description: | + Output only. The state of the task. + values: + - :STATE_UNSPECIFIED + - :PENDING + - :RUNNING + - :SUCCEEDED + - :CANCEL_PENDING + - :CANCELLING + - :CANCELLED + - :FAILED + - :SKIPPED + - :NOT_TRIGGERED + - !ruby/object:Api::Type::String + name: 'endTime' + description: | + Output only. Task end time. + - !ruby/object:Api::Type::NestedObject + name: 'outputs' + description: | + Output only. The runtime output artifacts of the task. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + A list of artifact metadata. + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Output only. Task create time. + - !ruby/object:Api::Type::String + name: 'startTime' + description: | + Output only. Task start time. + - !ruby/object:Api::Type::NestedObject + name: 'execution' + description: | + Instance of a general execution. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + The labels with user-defined metadata to organize your Executions. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. No more than 64 user labels can be associated with one Execution (System labels are excluded). + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Output only. Timestamp when this Execution was created. + - !ruby/object:Api::Type::String + name: 'schemaVersion' + description: | + The version of the schema in `schema_title` to use. Schema title and version is expected to be registered in earlier Create Schema calls. And both are used together as unique identifiers to identify schemas within the local metadata store. + - !ruby/object:Api::Type::Enum + name: 'state' + description: | + The state of this Execution. This is a property of the Execution, and does not imply or capture any ongoing process. This property is managed by clients (such as Vertex AI Pipelines) and the system does not prescribe or check the validity of state transitions. + values: + - :STATE_UNSPECIFIED + - :NEW + - :RUNNING + - :COMPLETE + - :FAILED + - :CACHED + - :CANCELLED + - !ruby/object:Api::Type::String + name: 'name' + description: | + Output only. The resource name of the Execution. + - !ruby/object:Api::Type::String + name: 'etag' + description: | + An eTag used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + - !ruby/object:Api::Type::String + name: 'displayName' + description: | + User provided display name of the Execution. May be up to 128 Unicode characters. + - !ruby/object:Api::Type::NestedObject + name: 'metadata' + description: | + Properties of the Execution. Top level metadata keys' heading and trailing spaces will be trimmed. The size of this field should not exceed 200KB. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + Properties of the object. + - !ruby/object:Api::Type::String + name: 'schemaTitle' + description: | + The title of the schema describing the metadata. Schema title and version is expected to be registered in earlier Create Schema calls. And both are used together as unique identifiers to identify schemas within the local metadata store. + - !ruby/object:Api::Type::String + name: 'description' + description: | + Description of the Execution + - !ruby/object:Api::Type::String + name: 'updateTime' + description: | + Output only. Timestamp when this Execution was last updated. + - !ruby/object:Api::Type::String + name: 'taskName' + description: | + Output only. The user specified name of the task that is defined in pipeline_spec. + - !ruby/object:Api::Type::String + name: 'parentTaskId' + description: | + Output only. The id of the parent task if the task is within a component scope. Empty if the task is at the root level. + - !ruby/object:Api::Type::Enum + name: 'state' + description: | + Output only. State of the task. + values: + - :STATE_UNSPECIFIED + - :PENDING + - :RUNNING + - :SUCCEEDED + - :CANCEL_PENDING + - :CANCELLING + - :CANCELLED + - :FAILED + - :SKIPPED + - :NOT_TRIGGERED + - !ruby/object:Api::Type::String + name: 'taskId' + description: | + Output only. The system generated ID of the task. + - !ruby/object:Api::Type::NestedObject + name: 'executorDetail' + description: | + The runtime detail of a pipeline executor. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'containerDetail' + description: | + The detail of a container execution. It contains the job names of the lifecycle of a container execution. + properties: + - !ruby/object:Api::Type::Array + name: 'failedMainJobs' + description: | + Output only. The names of the previously failed CustomJob for the main container executions. The list includes the all attempts in chronological order. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'mainJob' + description: | + Output only. The name of the CustomJob for the main container execution. + - !ruby/object:Api::Type::String + name: 'preCachingCheckJob' + description: | + Output only. The name of the CustomJob for the pre-caching-check container execution. This job will be available if the PipelineJob.pipeline_spec specifies the `pre_caching_check` hook in the lifecycle events. + - !ruby/object:Api::Type::Array + name: 'failedPreCachingCheckJobs' + description: | + Output only. The names of the previously failed CustomJob for the pre-caching-check container executions. This job will be available if the PipelineJob.pipeline_spec specifies the `pre_caching_check` hook in the lifecycle events. The list includes the all attempts in chronological order. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'customJobDetail' + description: | + The detailed info for a custom job executor. + properties: + - !ruby/object:Api::Type::Array + name: 'failedJobs' + description: | + Output only. The names of the previously failed CustomJob. The list includes the all attempts in chronological order. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'job' + description: | + Output only. The name of the CustomJob. + - !ruby/object:Api::Type::NestedObject + name: 'error' + description: | + The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). + properties: + - !ruby/object:Api::Type::Array + name: 'details' + description: | + A list of messages that carry the error details. There is a common set of message types for APIs to use. + item_type: Api::Type::String + - !ruby/object:Api::Type::Integer + name: 'code' + description: | + The status code, which should be an enum value of google.rpc.Code. + - !ruby/object:Api::Type::String + name: 'message' + description: | + A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client. + - !ruby/object:Api::Type::NestedObject + name: 'pipelineRunContext' + description: | + Instance of a general context. + properties: + - !ruby/object:Api::Type::String + name: 'name' + description: | + Immutable. The resource name of the Context. + - !ruby/object:Api::Type::String + name: 'schemaTitle' + description: | + The title of the schema describing the metadata. Schema title and version is expected to be registered in earlier Create Schema calls. And both are used together as unique identifiers to identify schemas within the local metadata store. + - !ruby/object:Api::Type::String + name: 'etag' + description: | + An eTag used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + - !ruby/object:Api::Type::String + name: 'description' + description: | + Description of the Context + - !ruby/object:Api::Type::String + name: 'displayName' + description: | + User provided display name of the Context. May be up to 128 Unicode characters. + - !ruby/object:Api::Type::String + name: 'schemaVersion' + description: | + The version of the schema in schema_name to use. Schema title and version is expected to be registered in earlier Create Schema calls. And both are used together as unique identifiers to identify schemas within the local metadata store. + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Output only. Timestamp when this Context was created. + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + The labels with user-defined metadata to organize your Contexts. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. No more than 64 user labels can be associated with one Context (System labels are excluded). + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'metadata' + description: | + Properties of the Context. Top level metadata keys' heading and trailing spaces will be trimmed. The size of this field should not exceed 200KB. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + Properties of the object. + - !ruby/object:Api::Type::String + name: 'updateTime' + description: | + Output only. Timestamp when this Context was last updated. + - !ruby/object:Api::Type::Array + name: 'parentContexts' + description: | + Output only. A list of resource names of Contexts that are parents of this Context. A Context may have at most 10 parent_contexts. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'pipelineContext' + description: | + Instance of a general context. + properties: + - !ruby/object:Api::Type::String + name: 'name' + description: | + Immutable. The resource name of the Context. + - !ruby/object:Api::Type::String + name: 'schemaTitle' + description: | + The title of the schema describing the metadata. Schema title and version is expected to be registered in earlier Create Schema calls. And both are used together as unique identifiers to identify schemas within the local metadata store. + - !ruby/object:Api::Type::String + name: 'etag' + description: | + An eTag used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + - !ruby/object:Api::Type::String + name: 'description' + description: | + Description of the Context + - !ruby/object:Api::Type::String + name: 'displayName' + description: | + User provided display name of the Context. May be up to 128 Unicode characters. + - !ruby/object:Api::Type::String + name: 'schemaVersion' + description: | + The version of the schema in schema_name to use. Schema title and version is expected to be registered in earlier Create Schema calls. And both are used together as unique identifiers to identify schemas within the local metadata store. + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Output only. Timestamp when this Context was created. + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + The labels with user-defined metadata to organize your Contexts. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. No more than 64 user labels can be associated with one Context (System labels are excluded). + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'metadata' + description: | + Properties of the Context. Top level metadata keys' heading and trailing spaces will be trimmed. The size of this field should not exceed 200KB. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + Properties of the object. + - !ruby/object:Api::Type::String + name: 'updateTime' + description: | + Output only. Timestamp when this Context was last updated. + - !ruby/object:Api::Type::Array + name: 'parentContexts' + description: | + Output only. A list of resource names of Contexts that are parents of this Context. A Context may have at most 10 parent_contexts. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'templateMetadata' + description: | + Pipeline template metadata if PipelineJob.template_uri is from supported template registry. Currently, the only supported registry is Artifact Registry. + properties: + - !ruby/object:Api::Type::String + name: 'version' + description: | + The version_name in artifact registry. Will always be presented in output if the PipelineJob.template_uri is from supported template registry. Format is "sha256:abcdef123456...". + - !ruby/object:Api::Type::Enum + name: 'state' + description: | + Output only. The detailed state of the job. + values: + - :PIPELINE_STATE_UNSPECIFIED + - :PIPELINE_STATE_QUEUED + - :PIPELINE_STATE_PENDING + - :PIPELINE_STATE_RUNNING + - :PIPELINE_STATE_SUCCEEDED + - :PIPELINE_STATE_FAILED + - :PIPELINE_STATE_CANCELLING + - :PIPELINE_STATE_CANCELLED + - :PIPELINE_STATE_PAUSED + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Output only. Pipeline creation time. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Output only. The resource name of the PipelineJob. + - !ruby/object:Api::Type::String + name: 'scheduleName' + description: | + Output only. The schedule resource name. Only returned if the Pipeline is created by Schedule API. + - !ruby/object:Api::Type::Array + name: 'reservedIpRanges' + description: | + A list of names for the reserved ip ranges under the VPC network that can be used for this Pipeline Job's workload. If set, we will deploy the Pipeline Job's workload within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'startTime' + description: | + Output only. Pipeline start time. + - !ruby/object:Api::Type::String + name: 'serviceAccount' + description: | + The service account that the pipeline workload runs as. If not specified, the Compute Engine default service account in the project will be used. See https://cloud.google.com/compute/docs/access/service-accounts#default_service_account Users starting the pipeline must have the `iam.serviceAccounts.actAs` permission on this service account. + - !ruby/object:Api::Type::String + name: 'displayName' + description: | + The display name of the Pipeline. The name can be up to 128 characters long and can consist of any UTF-8 characters. + - !ruby/object:Api::Type::String + name: 'templateUri' + description: | + A template uri from where the PipelineJob.pipeline_spec, if empty, will be downloaded. + - !ruby/object:Api::Type::NestedObject + name: 'pipelineSpec' + description: | + The spec of the pipeline. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + Properties of the object. + - !ruby/object:Api::Type::String + name: 'network' + description: | + The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Pipeline Job's workload should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. Private services access must already be configured for the network. Pipeline job will apply the network configuration to the Google Cloud resources being launched, if applied, such as Vertex AI Training or Dataflow job. If left unspecified, the workload is not peered with any network. + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + The labels with user-defined metadata to organize PipelineJob. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. Note there is some reserved label key for Vertex AI Pipelines. - `vertex-ai-pipelines-run-billing-id`, user set value will get overrided. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'runtimeConfig' + description: | + The runtime config of a PipelineJob. + properties: + - !ruby/object:Api::Type::String + name: 'gcsOutputDirectory' + description: | + Required. A path in a Cloud Storage bucket, which will be treated as the root output directory of the pipeline. It is used by the system to generate the paths of output artifacts. The artifact paths are generated with a sub-path pattern `{job_id}/{task_id}/{output_key}` under the specified output directory. The service account specified in this pipeline must have the `storage.objects.get` and `storage.objects.create` permissions for this bucket. + - !ruby/object:Api::Type::NestedObject + name: 'parameterValues' + description: | + The runtime parameters of the PipelineJob. The parameters will be passed into PipelineJob.pipeline_spec to replace the placeholders at runtime. This field is used by pipelines built using `PipelineJob.pipeline_spec.schema_version` 2.1.0, such as pipelines built using Kubeflow Pipelines SDK 1.9 or higher and the v2 DSL. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::Enum + name: 'failurePolicy' + description: | + Represents the failure policy of a pipeline. Currently, the default of a pipeline is that the pipeline will continue to run until no more tasks can be executed, also known as PIPELINE_FAILURE_POLICY_FAIL_SLOW. However, if a pipeline is set to PIPELINE_FAILURE_POLICY_FAIL_FAST, it will stop scheduling any new tasks when a task has failed. Any scheduled tasks will continue to completion. + values: + - :PIPELINE_FAILURE_POLICY_UNSPECIFIED + - :PIPELINE_FAILURE_POLICY_FAIL_SLOW + - :PIPELINE_FAILURE_POLICY_FAIL_FAST + - !ruby/object:Api::Type::NestedObject + name: 'parameters' + description: | + Deprecated. Use RuntimeConfig.parameter_values instead. The runtime parameters of the PipelineJob. The parameters will be passed into PipelineJob.pipeline_spec to replace the placeholders at runtime. This field is used by pipelines built using `PipelineJob.pipeline_spec.schema_version` 2.0.0 or lower, such as pipelines built using Kubeflow Pipelines SDK 1.8 or lower. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + Value is the value of the field. + - !ruby/object:Api::Type::NestedObject + name: 'inputArtifacts' + description: | + The runtime artifacts of the PipelineJob. The key will be the input artifact name and the value would be one of the InputArtifact. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + The type of an input artifact. + + + + - !ruby/object:Api::Resource + name: Schedule + base_url: '{{parent}}/schedules' + self_link: '{{name}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/vertex-ai/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + An instance of a Schedule periodically schedules runs to make API calls based on user specified time specification and API request type. + properties: + + - !ruby/object:Api::Type::String + name: 'startedRunCount' + description: | + Output only. The number of runs started by this schedule. + - !ruby/object:Api::Type::Boolean + name: 'allowQueueing' + description: | + Optional. Whether new scheduled runs can be queued when max_concurrent_runs limit is reached. If set to true, new runs will be queued instead of skipped. Default to false. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Immutable. The resource name of the Schedule. + - !ruby/object:Api::Type::String + name: 'cron' + description: | + Cron schedule (https://en.wikipedia.org/wiki/Cron) to launch scheduled runs. To explicitly set a timezone to the cron tab, apply a prefix in the cron tab: "CRON_TZ=${IANA_TIME_ZONE}" or "TZ=${IANA_TIME_ZONE}". The ${IANA_TIME_ZONE} may only be a valid string from IANA time zone database. For example, "CRON_TZ=America/New_York 1 * * * *", or "TZ=America/New_York 1 * * * *". + - !ruby/object:Api::Type::String + name: 'lastPauseTime' + description: | + Output only. Timestamp when this Schedule was last paused. Unset if never paused. + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Output only. Timestamp when this Schedule was created. + - !ruby/object:Api::Type::String + name: 'startTime' + description: | + Optional. Timestamp after which the first run can be scheduled. Default to Schedule create time if not specified. + - !ruby/object:Api::Type::String + name: 'maxRunCount' + description: | + Optional. Maximum run count of the schedule. If specified, The schedule will be completed when either started_run_count >= max_run_count or when end_time is reached. If not specified, new runs will keep getting scheduled until this Schedule is paused or deleted. Already scheduled runs will be allowed to complete. Unset if not specified. + - !ruby/object:Api::Type::String + name: 'nextRunTime' + description: | + Output only. Timestamp when this Schedule should schedule the next run. Having a next_run_time in the past means the runs are being started behind schedule. + - !ruby/object:Api::Type::String + name: 'updateTime' + description: | + Output only. Timestamp when this Schedule was updated. + - !ruby/object:Api::Type::NestedObject + name: 'lastScheduledRunResponse' + description: | + Status of a scheduled run. + properties: + - !ruby/object:Api::Type::String + name: 'runResponse' + description: | + The response of the scheduled run. + - !ruby/object:Api::Type::String + name: 'scheduledRunTime' + description: | + The scheduled run time based on the user-specified schedule. + - !ruby/object:Api::Type::String + name: 'lastResumeTime' + description: | + Output only. Timestamp when this Schedule was last resumed. Unset if never resumed from pause. + - !ruby/object:Api::Type::String + name: 'maxConcurrentRunCount' + description: | + Required. Maximum number of runs that can be started concurrently for this Schedule. This is the limit for starting the scheduled requests and not the execution of the operations/jobs created by the requests (if applicable). + - !ruby/object:Api::Type::Enum + name: 'state' + description: | + Output only. The state of this Schedule. + values: + - :STATE_UNSPECIFIED + - :ACTIVE + - :PAUSED + - :COMPLETED + - !ruby/object:Api::Type::NestedObject + name: 'createPipelineJobRequest' + description: | + Request message for PipelineService.CreatePipelineJob. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'pipelineJob' + description: | + An instance of a machine learning PipelineJob. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'encryptionSpec' + description: | + Represents a customer-managed encryption key spec that can be applied to a top-level resource. + properties: + - !ruby/object:Api::Type::String + name: 'kmsKeyName' + description: | + Required. The Cloud KMS resource identifier of the customer managed encryption key used to protect a resource. Has the form: `projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key`. The key needs to be in the same region as where the compute resource is created. + - !ruby/object:Api::Type::String + name: 'endTime' + description: | + Output only. Pipeline end time. + - !ruby/object:Api::Type::NestedObject + name: 'error' + description: | + The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). + properties: + - !ruby/object:Api::Type::Array + name: 'details' + description: | + A list of messages that carry the error details. There is a common set of message types for APIs to use. + item_type: Api::Type::String + - !ruby/object:Api::Type::Integer + name: 'code' + description: | + The status code, which should be an enum value of google.rpc.Code. + - !ruby/object:Api::Type::String + name: 'message' + description: | + A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client. + - !ruby/object:Api::Type::String + name: 'updateTime' + description: | + Output only. Timestamp when this PipelineJob was most recently updated. + - !ruby/object:Api::Type::NestedObject + name: 'jobDetail' + description: | + The runtime detail of PipelineJob. + properties: + - !ruby/object:Api::Type::Array + name: 'taskDetails' + description: | + Output only. The runtime details of the tasks under the pipeline. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::NestedObject + name: 'inputs' + description: | + Output only. The runtime input artifacts of the task. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + A list of artifact metadata. + - !ruby/object:Api::Type::Array + name: 'pipelineTaskStatus' + description: | + Output only. A list of task status. This field keeps a record of task status evolving over time. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'updateTime' + description: | + Output only. Update time of this status. + - !ruby/object:Api::Type::NestedObject + name: 'error' + description: | + The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). + properties: + - !ruby/object:Api::Type::Array + name: 'details' + description: | + A list of messages that carry the error details. There is a common set of message types for APIs to use. + item_type: Api::Type::String + - !ruby/object:Api::Type::Integer + name: 'code' + description: | + The status code, which should be an enum value of google.rpc.Code. + - !ruby/object:Api::Type::String + name: 'message' + description: | + A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client. + - !ruby/object:Api::Type::Enum + name: 'state' + description: | + Output only. The state of the task. + values: + - :STATE_UNSPECIFIED + - :PENDING + - :RUNNING + - :SUCCEEDED + - :CANCEL_PENDING + - :CANCELLING + - :CANCELLED + - :FAILED + - :SKIPPED + - :NOT_TRIGGERED + - !ruby/object:Api::Type::String + name: 'endTime' + description: | + Output only. Task end time. + - !ruby/object:Api::Type::NestedObject + name: 'outputs' + description: | + Output only. The runtime output artifacts of the task. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + A list of artifact metadata. + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Output only. Task create time. + - !ruby/object:Api::Type::String + name: 'startTime' + description: | + Output only. Task start time. + - !ruby/object:Api::Type::NestedObject + name: 'execution' + description: | + Instance of a general execution. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + The labels with user-defined metadata to organize your Executions. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. No more than 64 user labels can be associated with one Execution (System labels are excluded). + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Output only. Timestamp when this Execution was created. + - !ruby/object:Api::Type::String + name: 'schemaVersion' + description: | + The version of the schema in `schema_title` to use. Schema title and version is expected to be registered in earlier Create Schema calls. And both are used together as unique identifiers to identify schemas within the local metadata store. + - !ruby/object:Api::Type::Enum + name: 'state' + description: | + The state of this Execution. This is a property of the Execution, and does not imply or capture any ongoing process. This property is managed by clients (such as Vertex AI Pipelines) and the system does not prescribe or check the validity of state transitions. + values: + - :STATE_UNSPECIFIED + - :NEW + - :RUNNING + - :COMPLETE + - :FAILED + - :CACHED + - :CANCELLED + - !ruby/object:Api::Type::String + name: 'name' + description: | + Output only. The resource name of the Execution. + - !ruby/object:Api::Type::String + name: 'etag' + description: | + An eTag used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + - !ruby/object:Api::Type::String + name: 'displayName' + description: | + User provided display name of the Execution. May be up to 128 Unicode characters. + - !ruby/object:Api::Type::NestedObject + name: 'metadata' + description: | + Properties of the Execution. Top level metadata keys' heading and trailing spaces will be trimmed. The size of this field should not exceed 200KB. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + Properties of the object. + - !ruby/object:Api::Type::String + name: 'schemaTitle' + description: | + The title of the schema describing the metadata. Schema title and version is expected to be registered in earlier Create Schema calls. And both are used together as unique identifiers to identify schemas within the local metadata store. + - !ruby/object:Api::Type::String + name: 'description' + description: | + Description of the Execution + - !ruby/object:Api::Type::String + name: 'updateTime' + description: | + Output only. Timestamp when this Execution was last updated. + - !ruby/object:Api::Type::String + name: 'taskName' + description: | + Output only. The user specified name of the task that is defined in pipeline_spec. + - !ruby/object:Api::Type::String + name: 'parentTaskId' + description: | + Output only. The id of the parent task if the task is within a component scope. Empty if the task is at the root level. + - !ruby/object:Api::Type::Enum + name: 'state' + description: | + Output only. State of the task. + values: + - :STATE_UNSPECIFIED + - :PENDING + - :RUNNING + - :SUCCEEDED + - :CANCEL_PENDING + - :CANCELLING + - :CANCELLED + - :FAILED + - :SKIPPED + - :NOT_TRIGGERED + - !ruby/object:Api::Type::String + name: 'taskId' + description: | + Output only. The system generated ID of the task. + - !ruby/object:Api::Type::NestedObject + name: 'executorDetail' + description: | + The runtime detail of a pipeline executor. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'containerDetail' + description: | + The detail of a container execution. It contains the job names of the lifecycle of a container execution. + properties: + - !ruby/object:Api::Type::Array + name: 'failedMainJobs' + description: | + Output only. The names of the previously failed CustomJob for the main container executions. The list includes the all attempts in chronological order. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'mainJob' + description: | + Output only. The name of the CustomJob for the main container execution. + - !ruby/object:Api::Type::String + name: 'preCachingCheckJob' + description: | + Output only. The name of the CustomJob for the pre-caching-check container execution. This job will be available if the PipelineJob.pipeline_spec specifies the `pre_caching_check` hook in the lifecycle events. + - !ruby/object:Api::Type::Array + name: 'failedPreCachingCheckJobs' + description: | + Output only. The names of the previously failed CustomJob for the pre-caching-check container executions. This job will be available if the PipelineJob.pipeline_spec specifies the `pre_caching_check` hook in the lifecycle events. The list includes the all attempts in chronological order. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'customJobDetail' + description: | + The detailed info for a custom job executor. + properties: + - !ruby/object:Api::Type::Array + name: 'failedJobs' + description: | + Output only. The names of the previously failed CustomJob. The list includes the all attempts in chronological order. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'job' + description: | + Output only. The name of the CustomJob. + - !ruby/object:Api::Type::NestedObject + name: 'error' + description: | + The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). + properties: + - !ruby/object:Api::Type::Array + name: 'details' + description: | + A list of messages that carry the error details. There is a common set of message types for APIs to use. + item_type: Api::Type::String + - !ruby/object:Api::Type::Integer + name: 'code' + description: | + The status code, which should be an enum value of google.rpc.Code. + - !ruby/object:Api::Type::String + name: 'message' + description: | + A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client. + - !ruby/object:Api::Type::NestedObject + name: 'pipelineRunContext' + description: | + Instance of a general context. + properties: + - !ruby/object:Api::Type::String + name: 'name' + description: | + Immutable. The resource name of the Context. + - !ruby/object:Api::Type::String + name: 'schemaTitle' + description: | + The title of the schema describing the metadata. Schema title and version is expected to be registered in earlier Create Schema calls. And both are used together as unique identifiers to identify schemas within the local metadata store. + - !ruby/object:Api::Type::String + name: 'etag' + description: | + An eTag used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + - !ruby/object:Api::Type::String + name: 'description' + description: | + Description of the Context + - !ruby/object:Api::Type::String + name: 'displayName' + description: | + User provided display name of the Context. May be up to 128 Unicode characters. + - !ruby/object:Api::Type::String + name: 'schemaVersion' + description: | + The version of the schema in schema_name to use. Schema title and version is expected to be registered in earlier Create Schema calls. And both are used together as unique identifiers to identify schemas within the local metadata store. + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Output only. Timestamp when this Context was created. + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + The labels with user-defined metadata to organize your Contexts. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. No more than 64 user labels can be associated with one Context (System labels are excluded). + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'metadata' + description: | + Properties of the Context. Top level metadata keys' heading and trailing spaces will be trimmed. The size of this field should not exceed 200KB. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + Properties of the object. + - !ruby/object:Api::Type::String + name: 'updateTime' + description: | + Output only. Timestamp when this Context was last updated. + - !ruby/object:Api::Type::Array + name: 'parentContexts' + description: | + Output only. A list of resource names of Contexts that are parents of this Context. A Context may have at most 10 parent_contexts. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'pipelineContext' + description: | + Instance of a general context. + properties: + - !ruby/object:Api::Type::String + name: 'name' + description: | + Immutable. The resource name of the Context. + - !ruby/object:Api::Type::String + name: 'schemaTitle' + description: | + The title of the schema describing the metadata. Schema title and version is expected to be registered in earlier Create Schema calls. And both are used together as unique identifiers to identify schemas within the local metadata store. + - !ruby/object:Api::Type::String + name: 'etag' + description: | + An eTag used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + - !ruby/object:Api::Type::String + name: 'description' + description: | + Description of the Context + - !ruby/object:Api::Type::String + name: 'displayName' + description: | + User provided display name of the Context. May be up to 128 Unicode characters. + - !ruby/object:Api::Type::String + name: 'schemaVersion' + description: | + The version of the schema in schema_name to use. Schema title and version is expected to be registered in earlier Create Schema calls. And both are used together as unique identifiers to identify schemas within the local metadata store. + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Output only. Timestamp when this Context was created. + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + The labels with user-defined metadata to organize your Contexts. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. No more than 64 user labels can be associated with one Context (System labels are excluded). + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'metadata' + description: | + Properties of the Context. Top level metadata keys' heading and trailing spaces will be trimmed. The size of this field should not exceed 200KB. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + Properties of the object. + - !ruby/object:Api::Type::String + name: 'updateTime' + description: | + Output only. Timestamp when this Context was last updated. + - !ruby/object:Api::Type::Array + name: 'parentContexts' + description: | + Output only. A list of resource names of Contexts that are parents of this Context. A Context may have at most 10 parent_contexts. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'templateMetadata' + description: | + Pipeline template metadata if PipelineJob.template_uri is from supported template registry. Currently, the only supported registry is Artifact Registry. + properties: + - !ruby/object:Api::Type::String + name: 'version' + description: | + The version_name in artifact registry. Will always be presented in output if the PipelineJob.template_uri is from supported template registry. Format is "sha256:abcdef123456...". + - !ruby/object:Api::Type::Enum + name: 'state' + description: | + Output only. The detailed state of the job. + values: + - :PIPELINE_STATE_UNSPECIFIED + - :PIPELINE_STATE_QUEUED + - :PIPELINE_STATE_PENDING + - :PIPELINE_STATE_RUNNING + - :PIPELINE_STATE_SUCCEEDED + - :PIPELINE_STATE_FAILED + - :PIPELINE_STATE_CANCELLING + - :PIPELINE_STATE_CANCELLED + - :PIPELINE_STATE_PAUSED + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Output only. Pipeline creation time. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Output only. The resource name of the PipelineJob. + - !ruby/object:Api::Type::String + name: 'scheduleName' + description: | + Output only. The schedule resource name. Only returned if the Pipeline is created by Schedule API. + - !ruby/object:Api::Type::Array + name: 'reservedIpRanges' + description: | + A list of names for the reserved ip ranges under the VPC network that can be used for this Pipeline Job's workload. If set, we will deploy the Pipeline Job's workload within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'startTime' + description: | + Output only. Pipeline start time. + - !ruby/object:Api::Type::String + name: 'serviceAccount' + description: | + The service account that the pipeline workload runs as. If not specified, the Compute Engine default service account in the project will be used. See https://cloud.google.com/compute/docs/access/service-accounts#default_service_account Users starting the pipeline must have the `iam.serviceAccounts.actAs` permission on this service account. + - !ruby/object:Api::Type::String + name: 'displayName' + description: | + The display name of the Pipeline. The name can be up to 128 characters long and can consist of any UTF-8 characters. + - !ruby/object:Api::Type::String + name: 'templateUri' + description: | + A template uri from where the PipelineJob.pipeline_spec, if empty, will be downloaded. + - !ruby/object:Api::Type::NestedObject + name: 'pipelineSpec' + description: | + The spec of the pipeline. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + Properties of the object. + - !ruby/object:Api::Type::String + name: 'network' + description: | + The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Pipeline Job's workload should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. Private services access must already be configured for the network. Pipeline job will apply the network configuration to the Google Cloud resources being launched, if applied, such as Vertex AI Training or Dataflow job. If left unspecified, the workload is not peered with any network. + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + The labels with user-defined metadata to organize PipelineJob. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. Note there is some reserved label key for Vertex AI Pipelines. - `vertex-ai-pipelines-run-billing-id`, user set value will get overrided. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'runtimeConfig' + description: | + The runtime config of a PipelineJob. + properties: + - !ruby/object:Api::Type::String + name: 'gcsOutputDirectory' + description: | + Required. A path in a Cloud Storage bucket, which will be treated as the root output directory of the pipeline. It is used by the system to generate the paths of output artifacts. The artifact paths are generated with a sub-path pattern `{job_id}/{task_id}/{output_key}` under the specified output directory. The service account specified in this pipeline must have the `storage.objects.get` and `storage.objects.create` permissions for this bucket. + - !ruby/object:Api::Type::NestedObject + name: 'parameterValues' + description: | + The runtime parameters of the PipelineJob. The parameters will be passed into PipelineJob.pipeline_spec to replace the placeholders at runtime. This field is used by pipelines built using `PipelineJob.pipeline_spec.schema_version` 2.1.0, such as pipelines built using Kubeflow Pipelines SDK 1.9 or higher and the v2 DSL. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::Enum + name: 'failurePolicy' + description: | + Represents the failure policy of a pipeline. Currently, the default of a pipeline is that the pipeline will continue to run until no more tasks can be executed, also known as PIPELINE_FAILURE_POLICY_FAIL_SLOW. However, if a pipeline is set to PIPELINE_FAILURE_POLICY_FAIL_FAST, it will stop scheduling any new tasks when a task has failed. Any scheduled tasks will continue to completion. + values: + - :PIPELINE_FAILURE_POLICY_UNSPECIFIED + - :PIPELINE_FAILURE_POLICY_FAIL_SLOW + - :PIPELINE_FAILURE_POLICY_FAIL_FAST + - !ruby/object:Api::Type::NestedObject + name: 'parameters' + description: | + Deprecated. Use RuntimeConfig.parameter_values instead. The runtime parameters of the PipelineJob. The parameters will be passed into PipelineJob.pipeline_spec to replace the placeholders at runtime. This field is used by pipelines built using `PipelineJob.pipeline_spec.schema_version` 2.0.0 or lower, such as pipelines built using Kubeflow Pipelines SDK 1.8 or lower. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + Value is the value of the field. + - !ruby/object:Api::Type::NestedObject + name: 'inputArtifacts' + description: | + The runtime artifacts of the PipelineJob. The key will be the input artifact name and the value would be one of the InputArtifact. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + The type of an input artifact. + - !ruby/object:Api::Type::String + name: 'pipelineJobId' + description: | + The ID to use for the PipelineJob, which will become the final component of the PipelineJob name. If not provided, an ID will be automatically generated. This value should be less than 128 characters, and valid characters are /a-z-/. + - !ruby/object:Api::Type::String + name: 'parent' + description: | + Required. The resource name of the Location to create the PipelineJob in. Format: `projects/{project}/locations/{location}` + - !ruby/object:Api::Type::String + name: 'displayName' + description: | + Required. User provided name of the Schedule. The name can be up to 128 characters long and can consist of any UTF-8 characters. + - !ruby/object:Api::Type::Boolean + name: 'catchUp' + description: | + Output only. Whether to backfill missed runs when the schedule is resumed from PAUSED state. If set to true, all missed runs will be scheduled. New runs will be scheduled after the backfill is complete. Default to false. + - !ruby/object:Api::Type::String + name: 'endTime' + description: | + Optional. Timestamp after which no new runs can be scheduled. If specified, The schedule will be completed when either end_time is reached or when scheduled_run_count >= max_run_count. If not specified, new runs will keep getting scheduled until this Schedule is paused or deleted. Already scheduled runs will be allowed to complete. Unset if not specified. + + + + - !ruby/object:Api::Resource + name: Tensorboard + base_url: '{{parent}}/tensorboards' + self_link: '{{name}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/vertex-ai/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + Tensorboard is a physical database that stores users' training metrics. A default Tensorboard is provided in each region of a Google Cloud project. If needed users can also create extra Tensorboards in their projects. + properties: + + - !ruby/object:Api::Type::String + name: 'name' + description: | + Output only. Name of the Tensorboard. Format: `projects/{project}/locations/{location}/tensorboards/{tensorboard}` + - !ruby/object:Api::Type::Boolean + name: 'isDefault' + description: | + Used to indicate if the TensorBoard instance is the default one. Each project & region can have at most one default TensorBoard instance. Creation of a default TensorBoard instance and updating an existing TensorBoard instance to be default will mark all other TensorBoard instances (if any) as non default. + - !ruby/object:Api::Type::String + name: 'updateTime' + description: | + Output only. Timestamp when this Tensorboard was last updated. + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + The labels with user-defined metadata to organize your Tensorboards. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. No more than 64 user labels can be associated with one Tensorboard (System labels are excluded). See https://goo.gl/xmQnxf for more information and examples of labels. System reserved label keys are prefixed with "aiplatform.googleapis.com/" and are immutable. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::String + name: 'blobStoragePathPrefix' + description: | + Output only. Consumer project Cloud Storage path prefix used to store blob data, which can either be a bucket or directory. Does not end with a '/'. + - !ruby/object:Api::Type::String + name: 'etag' + description: | + Used to perform a consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Output only. Timestamp when this Tensorboard was created. + - !ruby/object:Api::Type::Integer + name: 'runCount' + description: | + Output only. The number of Runs stored in this Tensorboard. + - !ruby/object:Api::Type::NestedObject + name: 'encryptionSpec' + description: | + Represents a customer-managed encryption key spec that can be applied to a top-level resource. + properties: + - !ruby/object:Api::Type::String + name: 'kmsKeyName' + description: | + Required. The Cloud KMS resource identifier of the customer managed encryption key used to protect a resource. Has the form: `projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key`. The key needs to be in the same region as where the compute resource is created. + - !ruby/object:Api::Type::String + name: 'displayName' + description: | + Required. User provided name of this Tensorboard. + - !ruby/object:Api::Type::String + name: 'description' + description: | + Description of this Tensorboard. + + + + - !ruby/object:Api::Resource + name: Model + base_url: '{{parent}}/models' + self_link: '{{name}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/vertex-ai/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + A trained machine learning Model. + properties: + + - !ruby/object:Api::Type::NestedObject + name: 'modelSourceInfo' + description: | + Detail description of the source information of the model. + properties: + - !ruby/object:Api::Type::Boolean + name: 'copy' + description: | + If this Model is copy of another Model. If true then source_type pertains to the original. + - !ruby/object:Api::Type::Enum + name: 'sourceType' + description: | + Type of the model source. + values: + - :MODEL_SOURCE_TYPE_UNSPECIFIED + - :AUTOML + - :CUSTOM + - :BQML + - :MODEL_GARDEN + - :GENIE + - !ruby/object:Api::Type::String + name: 'name' + description: | + The resource name of the Model. + - !ruby/object:Api::Type::String + name: 'metadata' + description: | + Immutable. An additional information about the Model; the schema of the metadata can be found in metadata_schema. Unset if the Model does not have any additional information. + - !ruby/object:Api::Type::String + name: 'updateTime' + description: | + Output only. Timestamp when this Model was most recently updated. + - !ruby/object:Api::Type::String + name: 'etag' + description: | + Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + - !ruby/object:Api::Type::String + name: 'description' + description: | + The description of the Model. + - !ruby/object:Api::Type::Array + name: 'deployedModels' + description: | + Output only. The pointers to DeployedModels created from this Model. Note that Model could have been deployed to Endpoints in different Locations. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'deployedModelId' + description: | + Immutable. An ID of a DeployedModel in the above Endpoint. + - !ruby/object:Api::Type::String + name: 'endpoint' + description: | + Immutable. A resource name of an Endpoint. + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Output only. Timestamp when this Model was uploaded into Vertex AI. + - !ruby/object:Api::Type::NestedObject + name: 'explanationSpec' + description: | + Specification of Model explanation. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'parameters' + description: | + Parameters to configure explaining for Model's predictions. + properties: + - !ruby/object:Api::Type::Array + name: 'outputIndices' + description: | + If populated, only returns attributions that have output_index contained in output_indices. It must be an ndarray of integers, with the same shape of the output it's explaining. If not populated, returns attributions for top_k indices of outputs. If neither top_k nor output_indices is populated, returns the argmax index of the outputs. Only applicable to Models that predict multiple outputs (e,g, multi-class Models that predict multiple classes). + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'examples' + description: | + Example-based explainability that returns the nearest neighbors from the provided dataset. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'presets' + description: | + Preset configuration for example-based explanations + properties: + - !ruby/object:Api::Type::Enum + name: 'modality' + description: | + The modality of the uploaded model, which automatically configures the distance measurement and feature normalization for the underlying example index and queries. If your model does not precisely fit one of these types, it is okay to choose the closest type. + values: + - :MODALITY_UNSPECIFIED + - :IMAGE + - :TEXT + - :TABULAR + - !ruby/object:Api::Type::Enum + name: 'query' + description: | + Preset option controlling parameters for speed-precision trade-off when querying for examples. If omitted, defaults to `PRECISE`. + values: + - :PRECISE + - :FAST + - !ruby/object:Api::Type::Integer + name: 'neighborCount' + description: | + The number of neighbors to return when querying for examples. + - !ruby/object:Api::Type::NestedObject + name: 'exampleGcsSource' + description: | + The Cloud Storage input instances. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'gcsSource' + description: | + The Google Cloud Storage location for the input content. + properties: + - !ruby/object:Api::Type::Array + name: 'uris' + description: | + Required. Google Cloud Storage URI(-s) to the input file(s). May contain wildcards. For more information on wildcards, see https://cloud.google.com/storage/docs/gsutil/addlhelp/WildcardNames. + item_type: Api::Type::String + - !ruby/object:Api::Type::Enum + name: 'dataFormat' + description: | + The format in which instances are given, if not specified, assume it's JSONL format. Currently only JSONL format is supported. + values: + - :DATA_FORMAT_UNSPECIFIED + - :JSONL + - !ruby/object:Api::Type::String + name: 'nearestNeighborSearchConfig' + description: | + The full configuration for the generated index, the semantics are the same as metadata and should match [NearestNeighborSearchConfig](https://cloud.google.com/vertex-ai/docs/explainable-ai/configuring-explanations-example-based#nearest-neighbor-search-config). + - !ruby/object:Api::Type::NestedObject + name: 'xraiAttribution' + description: | + An explanation method that redistributes Integrated Gradients attributions to segmented regions, taking advantage of the model's fully differentiable structure. Refer to this paper for more details: https://arxiv.org/abs/1906.02825 Supported only by image Models. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'smoothGradConfig' + description: | + Config for SmoothGrad approximation of gradients. When enabled, the gradients are approximated by averaging the gradients from noisy samples in the vicinity of the inputs. Adding noise can help improve the computed gradients. Refer to this paper for more details: https://arxiv.org/pdf/1706.03825.pdf + properties: + - !ruby/object:Api::Type::NestedObject + name: 'featureNoiseSigma' + description: | + Noise sigma by features. Noise sigma represents the standard deviation of the gaussian kernel that will be used to add noise to interpolated inputs prior to computing gradients. + properties: + - !ruby/object:Api::Type::Array + name: 'noiseSigma' + description: | + Noise sigma per feature. No noise is added to features that are not set. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'name' + description: | + The name of the input feature for which noise sigma is provided. The features are defined in explanation metadata inputs. + - !ruby/object:Api::Type::Integer + name: 'sigma' + description: | + This represents the standard deviation of the Gaussian kernel that will be used to add noise to the feature prior to computing gradients. Similar to noise_sigma but represents the noise added to the current feature. Defaults to 0.1. + - !ruby/object:Api::Type::Integer + name: 'noiseSigma' + description: | + This is a single float value and will be used to add noise to all the features. Use this field when all features are normalized to have the same distribution: scale to range [0, 1], [-1, 1] or z-scoring, where features are normalized to have 0-mean and 1-variance. Learn more about [normalization](https://developers.google.com/machine-learning/data-prep/transform/normalization). For best results the recommended value is about 10% - 20% of the standard deviation of the input feature. Refer to section 3.2 of the SmoothGrad paper: https://arxiv.org/pdf/1706.03825.pdf. Defaults to 0.1. If the distribution is different per feature, set feature_noise_sigma instead for each feature. + - !ruby/object:Api::Type::Integer + name: 'noisySampleCount' + description: | + The number of gradient samples to use for approximation. The higher this number, the more accurate the gradient is, but the runtime complexity increases by this factor as well. Valid range of its value is [1, 50]. Defaults to 3. + - !ruby/object:Api::Type::Integer + name: 'stepCount' + description: | + Required. The number of steps for approximating the path integral. A good value to start is 50 and gradually increase until the sum to diff property is met within the desired error range. Valid range of its value is [1, 100], inclusively. + - !ruby/object:Api::Type::NestedObject + name: 'blurBaselineConfig' + description: | + Config for blur baseline. When enabled, a linear path from the maximally blurred image to the input image is created. Using a blurred baseline instead of zero (black image) is motivated by the BlurIG approach explained here: https://arxiv.org/abs/2004.03383 + properties: + - !ruby/object:Api::Type::Integer + name: 'maxBlurSigma' + description: | + The standard deviation of the blur kernel for the blurred baseline. The same blurring parameter is used for both the height and the width dimension. If not set, the method defaults to the zero (i.e. black for images) baseline. + - !ruby/object:Api::Type::Integer + name: 'topK' + description: | + If populated, returns attributions for top K indices of outputs (defaults to 1). Only applies to Models that predicts more than one outputs (e,g, multi-class Models). When set to -1, returns explanations for all outputs. + - !ruby/object:Api::Type::NestedObject + name: 'integratedGradientsAttribution' + description: | + An attribution method that computes the Aumann-Shapley value taking advantage of the model's fully differentiable structure. Refer to this paper for more details: https://arxiv.org/abs/1703.01365 + properties: + - !ruby/object:Api::Type::Integer + name: 'stepCount' + description: | + Required. The number of steps for approximating the path integral. A good value to start is 50 and gradually increase until the sum to diff property is within the desired error range. Valid range of its value is [1, 100], inclusively. + - !ruby/object:Api::Type::NestedObject + name: 'smoothGradConfig' + description: | + Config for SmoothGrad approximation of gradients. When enabled, the gradients are approximated by averaging the gradients from noisy samples in the vicinity of the inputs. Adding noise can help improve the computed gradients. Refer to this paper for more details: https://arxiv.org/pdf/1706.03825.pdf + properties: + - !ruby/object:Api::Type::NestedObject + name: 'featureNoiseSigma' + description: | + Noise sigma by features. Noise sigma represents the standard deviation of the gaussian kernel that will be used to add noise to interpolated inputs prior to computing gradients. + properties: + - !ruby/object:Api::Type::Array + name: 'noiseSigma' + description: | + Noise sigma per feature. No noise is added to features that are not set. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'name' + description: | + The name of the input feature for which noise sigma is provided. The features are defined in explanation metadata inputs. + - !ruby/object:Api::Type::Integer + name: 'sigma' + description: | + This represents the standard deviation of the Gaussian kernel that will be used to add noise to the feature prior to computing gradients. Similar to noise_sigma but represents the noise added to the current feature. Defaults to 0.1. + - !ruby/object:Api::Type::Integer + name: 'noiseSigma' + description: | + This is a single float value and will be used to add noise to all the features. Use this field when all features are normalized to have the same distribution: scale to range [0, 1], [-1, 1] or z-scoring, where features are normalized to have 0-mean and 1-variance. Learn more about [normalization](https://developers.google.com/machine-learning/data-prep/transform/normalization). For best results the recommended value is about 10% - 20% of the standard deviation of the input feature. Refer to section 3.2 of the SmoothGrad paper: https://arxiv.org/pdf/1706.03825.pdf. Defaults to 0.1. If the distribution is different per feature, set feature_noise_sigma instead for each feature. + - !ruby/object:Api::Type::Integer + name: 'noisySampleCount' + description: | + The number of gradient samples to use for approximation. The higher this number, the more accurate the gradient is, but the runtime complexity increases by this factor as well. Valid range of its value is [1, 50]. Defaults to 3. + - !ruby/object:Api::Type::NestedObject + name: 'blurBaselineConfig' + description: | + Config for blur baseline. When enabled, a linear path from the maximally blurred image to the input image is created. Using a blurred baseline instead of zero (black image) is motivated by the BlurIG approach explained here: https://arxiv.org/abs/2004.03383 + properties: + - !ruby/object:Api::Type::Integer + name: 'maxBlurSigma' + description: | + The standard deviation of the blur kernel for the blurred baseline. The same blurring parameter is used for both the height and the width dimension. If not set, the method defaults to the zero (i.e. black for images) baseline. + - !ruby/object:Api::Type::NestedObject + name: 'sampledShapleyAttribution' + description: | + An attribution method that approximates Shapley values for features that contribute to the label being predicted. A sampling strategy is used to approximate the value rather than considering all subsets of features. + properties: + - !ruby/object:Api::Type::Integer + name: 'pathCount' + description: | + Required. The number of feature permutations to consider when approximating the Shapley values. Valid range of its value is [1, 50], inclusively. + - !ruby/object:Api::Type::NestedObject + name: 'metadata' + description: | + Metadata describing the Model's input and output for explanation. + properties: + - !ruby/object:Api::Type::String + name: 'featureAttributionsSchemaUri' + description: | + Points to a YAML file stored on Google Cloud Storage describing the format of the feature attributions. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). AutoML tabular Models always have this field populated by Vertex AI. Note: The URI given on output may be different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. + - !ruby/object:Api::Type::String + name: 'latentSpaceSource' + description: | + Name of the source to generate embeddings for example based explanations. + - !ruby/object:Api::Type::NestedObject + name: 'outputs' + description: | + Required. Map from output names to output metadata. For Vertex AI-provided Tensorflow images, keys can be any user defined string that consists of any UTF-8 characters. For custom images, keys are the name of the output field in the prediction to be explained. Currently only one key is allowed. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + Metadata of the prediction output to be explained. + - !ruby/object:Api::Type::NestedObject + name: 'inputs' + description: | + Required. Map from feature names to feature input metadata. Keys are the name of the features. Values are the specification of the feature. An empty InputMetadata is valid. It describes a text feature which has the name specified as the key in ExplanationMetadata.inputs. The baseline of the empty feature is chosen by Vertex AI. For Vertex AI-provided Tensorflow images, the key can be any friendly name of the feature. Once specified, featureAttributions are keyed by this key (if not grouped with another feature). For custom images, the key must match with the key in instance. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + Metadata of the input of a feature. Fields other than InputMetadata.input_baselines are applicable only for Models that are using Vertex AI-provided images for Tensorflow. + - !ruby/object:Api::Type::NestedObject + name: 'encryptionSpec' + description: | + Represents a customer-managed encryption key spec that can be applied to a top-level resource. + properties: + - !ruby/object:Api::Type::String + name: 'kmsKeyName' + description: | + Required. The Cloud KMS resource identifier of the customer managed encryption key used to protect a resource. Has the form: `projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key`. The key needs to be in the same region as where the compute resource is created. + - !ruby/object:Api::Type::String + name: 'pipelineJob' + description: | + Optional. This field is populated if the model is produced by a pipeline job. + - !ruby/object:Api::Type::NestedObject + name: 'predictSchemata' + description: | + Contains the schemata used in Model's predictions and explanations via PredictionService.Predict, PredictionService.Explain and BatchPredictionJob. + properties: + - !ruby/object:Api::Type::String + name: 'instanceSchemaUri' + description: | + Immutable. Points to a YAML file stored on Google Cloud Storage describing the format of a single instance, which are used in PredictRequest.instances, ExplainRequest.instances and BatchPredictionJob.input_config. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). AutoML Models always have this field populated by Vertex AI. Note: The URI given on output will be immutable and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. + - !ruby/object:Api::Type::String + name: 'parametersSchemaUri' + description: | + Immutable. Points to a YAML file stored on Google Cloud Storage describing the parameters of prediction and explanation via PredictRequest.parameters, ExplainRequest.parameters and BatchPredictionJob.model_parameters. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). AutoML Models always have this field populated by Vertex AI, if no parameters are supported, then it is set to an empty string. Note: The URI given on output will be immutable and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. + - !ruby/object:Api::Type::String + name: 'predictionSchemaUri' + description: | + Immutable. Points to a YAML file stored on Google Cloud Storage describing the format of a single prediction produced by this Model, which are returned via PredictResponse.predictions, ExplainResponse.explanations, and BatchPredictionJob.output_config. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). AutoML Models always have this field populated by Vertex AI. Note: The URI given on output will be immutable and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. + - !ruby/object:Api::Type::String + name: 'versionUpdateTime' + description: | + Output only. Timestamp when this version was most recently updated. + - !ruby/object:Api::Type::Array + name: 'supportedExportFormats' + description: | + Output only. The formats in which this Model may be exported. If empty, this Model is not available for export. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::Array + name: 'exportableContents' + description: | + Output only. The content of this Model that may be exported. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'id' + description: | + Output only. The ID of the export format. The possible format IDs are: * `tflite` Used for Android mobile devices. * `edgetpu-tflite` Used for [Edge TPU](https://cloud.google.com/edge-tpu/) devices. * `tf-saved-model` A tensorflow model in SavedModel format. * `tf-js` A [TensorFlow.js](https://www.tensorflow.org/js) model that can be used in the browser and in Node.js using JavaScript. * `core-ml` Used for iOS mobile devices. * `custom-trained` A Model that was uploaded or trained by custom code. + - !ruby/object:Api::Type::NestedObject + name: 'originalModelInfo' + description: | + Contains information about the original Model if this Model is a copy. + properties: + - !ruby/object:Api::Type::String + name: 'model' + description: | + Output only. The resource name of the Model this Model is a copy of, including the revision. Format: `projects/{project}/locations/{location}/models/{model_id}@{version_id}` + - !ruby/object:Api::Type::String + name: 'metadataArtifact' + description: | + Output only. The resource name of the Artifact that was created in MetadataStore when creating the Model. The Artifact resource name pattern is `projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}`. + - !ruby/object:Api::Type::Array + name: 'supportedInputStorageFormats' + description: | + Output only. The formats this Model supports in BatchPredictionJob.input_config. If PredictSchemata.instance_schema_uri exists, the instances should be given as per that schema. The possible formats are: * `jsonl` The JSON Lines format, where each instance is a single line. Uses GcsSource. * `csv` The CSV format, where each instance is a single comma-separated line. The first line in the file is the header, containing comma-separated field names. Uses GcsSource. * `tf-record` The TFRecord format, where each instance is a single record in tfrecord syntax. Uses GcsSource. * `tf-record-gzip` Similar to `tf-record`, but the file is gzipped. Uses GcsSource. * `bigquery` Each instance is a single row in BigQuery. Uses BigQuerySource. * `file-list` Each line of the file is the location of an instance to process, uses `gcs_source` field of the InputConfig object. If this Model doesn't support any of these formats it means it cannot be used with a BatchPredictionJob. However, if it has supported_deployment_resources_types, it could serve online predictions by using PredictionService.Predict or PredictionService.Explain. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'metadataSchemaUri' + description: | + Immutable. Points to a YAML file stored on Google Cloud Storage describing additional information about the Model, that is specific to it. Unset if the Model does not have any additional information. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). AutoML Models always have this field populated by Vertex AI, if no additional metadata is needed, this field is set to an empty string. Note: The URI given on output will be immutable and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. + - !ruby/object:Api::Type::NestedObject + name: 'containerSpec' + description: | + Specification of a container for serving predictions. Some fields in this message correspond to fields in the [Kubernetes Container v1 core specification](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#container-v1-core). + properties: + - !ruby/object:Api::Type::String + name: 'predictRoute' + description: | + Immutable. HTTP path on the container to send prediction requests to. Vertex AI forwards requests sent using projects.locations.endpoints.predict to this path on the container's IP address and port. Vertex AI then returns the container's response in the API response. For example, if you set this field to `/foo`, then when Vertex AI receives a prediction request, it forwards the request body in a POST request to the `/foo` path on the port of your container specified by the first value of this `ModelContainerSpec`'s ports field. If you don't specify this field, it defaults to the following value when you deploy this Model to an Endpoint: /v1/endpoints/ENDPOINT/deployedModels/DEPLOYED_MODEL:predict The placeholders in this value are replaced as follows: * ENDPOINT: The last segment (following `endpoints/`)of the Endpoint.name][] field of the Endpoint where this Model has been deployed. (Vertex AI makes this value available to your container code as the [`AIP_ENDPOINT_ID` environment variable](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables).) * DEPLOYED_MODEL: DeployedModel.id of the `DeployedModel`. (Vertex AI makes this value available to your container code as the [`AIP_DEPLOYED_MODEL_ID` environment variable](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables).) + - !ruby/object:Api::Type::String + name: 'imageUri' + description: | + Required. Immutable. URI of the Docker image to be used as the custom container for serving predictions. This URI must identify an image in Artifact Registry or Container Registry. Learn more about the [container publishing requirements](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#publishing), including permissions requirements for the Vertex AI Service Agent. The container image is ingested upon ModelService.UploadModel, stored internally, and this original path is afterwards not used. To learn about the requirements for the Docker image itself, see [Custom container requirements](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#). You can use the URI to one of Vertex AI's [pre-built container images for prediction](https://cloud.google.com/vertex-ai/docs/predictions/pre-built-containers) in this field. + - !ruby/object:Api::Type::Array + name: 'env' + description: | + Immutable. List of environment variables to set in the container. After the container starts running, code running in the container can read these environment variables. Additionally, the command and args fields can reference these variables. Later entries in this list can also reference earlier entries. For example, the following example sets the variable `VAR_2` to have the value `foo bar`: ```json [ { "name": "VAR_1", "value": "foo" }, { "name": "VAR_2", "value": "$(VAR_1) bar" } ] ``` If you switch the order of the variables in the example, then the expansion does not occur. This field corresponds to the `env` field of the Kubernetes Containers [v1 core API](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#container-v1-core). + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'name' + description: | + Required. Name of the environment variable. Must be a valid C identifier. + - !ruby/object:Api::Type::String + name: 'value' + description: | + Required. Variables that reference a $(VAR_NAME) are expanded using the previous defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. + - !ruby/object:Api::Type::Array + name: 'args' + description: | + Immutable. Specifies arguments for the command that runs when the container starts. This overrides the container's [`CMD`](https://docs.docker.com/engine/reference/builder/#cmd). Specify this field as an array of executable and arguments, similar to a Docker `CMD`'s "default parameters" form. If you don't specify this field but do specify the command field, then the command from the `command` field runs without any additional arguments. See the [Kubernetes documentation about how the `command` and `args` fields interact with a container's `ENTRYPOINT` and `CMD`](https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#notes). If you don't specify this field and don't specify the `command` field, then the container's [`ENTRYPOINT`](https://docs.docker.com/engine/reference/builder/#cmd) and `CMD` determine what runs based on their default behavior. See the Docker documentation about [how `CMD` and `ENTRYPOINT` interact](https://docs.docker.com/engine/reference/builder/#understand-how-cmd-and-entrypoint-interact). In this field, you can reference [environment variables set by Vertex AI](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables) and environment variables set in the env field. You cannot reference environment variables set in the Docker image. In order for environment variables to be expanded, reference them by using the following syntax: $( VARIABLE_NAME) Note that this differs from Bash variable expansion, which does not use parentheses. If a variable cannot be resolved, the reference in the input string is used unchanged. To avoid variable expansion, you can escape this syntax with `$$`; for example: $$(VARIABLE_NAME) This field corresponds to the `args` field of the Kubernetes Containers [v1 core API](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#container-v1-core). + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'command' + description: | + Immutable. Specifies the command that runs when the container starts. This overrides the container's [ENTRYPOINT](https://docs.docker.com/engine/reference/builder/#entrypoint). Specify this field as an array of executable and arguments, similar to a Docker `ENTRYPOINT`'s "exec" form, not its "shell" form. If you do not specify this field, then the container's `ENTRYPOINT` runs, in conjunction with the args field or the container's [`CMD`](https://docs.docker.com/engine/reference/builder/#cmd), if either exists. If this field is not specified and the container does not have an `ENTRYPOINT`, then refer to the Docker documentation about [how `CMD` and `ENTRYPOINT` interact](https://docs.docker.com/engine/reference/builder/#understand-how-cmd-and-entrypoint-interact). If you specify this field, then you can also specify the `args` field to provide additional arguments for this command. However, if you specify this field, then the container's `CMD` is ignored. See the [Kubernetes documentation about how the `command` and `args` fields interact with a container's `ENTRYPOINT` and `CMD`](https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#notes). In this field, you can reference [environment variables set by Vertex AI](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables) and environment variables set in the env field. You cannot reference environment variables set in the Docker image. In order for environment variables to be expanded, reference them by using the following syntax: $( VARIABLE_NAME) Note that this differs from Bash variable expansion, which does not use parentheses. If a variable cannot be resolved, the reference in the input string is used unchanged. To avoid variable expansion, you can escape this syntax with `$$`; for example: $$(VARIABLE_NAME) This field corresponds to the `command` field of the Kubernetes Containers [v1 core API](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#container-v1-core). + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'ports' + description: | + Immutable. List of ports to expose from the container. Vertex AI sends any prediction requests that it receives to the first port on this list. Vertex AI also sends [liveness and health checks](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#liveness) to this port. If you do not specify this field, it defaults to following value: ```json [ { "containerPort": 8080 } ] ``` Vertex AI does not use ports other than the first one listed. This field corresponds to the `ports` field of the Kubernetes Containers [v1 core API](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#container-v1-core). + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::Integer + name: 'containerPort' + description: | + The number of the port to expose on the pod's IP address. Must be a valid port number, between 1 and 65535 inclusive. + - !ruby/object:Api::Type::String + name: 'healthRoute' + description: | + Immutable. HTTP path on the container to send health checks to. Vertex AI intermittently sends GET requests to this path on the container's IP address and port to check that the container is healthy. Read more about [health checks](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#health). For example, if you set this field to `/bar`, then Vertex AI intermittently sends a GET request to the `/bar` path on the port of your container specified by the first value of this `ModelContainerSpec`'s ports field. If you don't specify this field, it defaults to the following value when you deploy this Model to an Endpoint: /v1/endpoints/ENDPOINT/deployedModels/ DEPLOYED_MODEL:predict The placeholders in this value are replaced as follows: * ENDPOINT: The last segment (following `endpoints/`)of the Endpoint.name][] field of the Endpoint where this Model has been deployed. (Vertex AI makes this value available to your container code as the [`AIP_ENDPOINT_ID` environment variable](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables).) * DEPLOYED_MODEL: DeployedModel.id of the `DeployedModel`. (Vertex AI makes this value available to your container code as the [`AIP_DEPLOYED_MODEL_ID` environment variable](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables).) + - !ruby/object:Api::Type::String + name: 'versionId' + description: | + Output only. Immutable. The version ID of the model. A new version is committed when a new model version is uploaded or trained under an existing model id. It is an auto-incrementing decimal number in string representation. + - !ruby/object:Api::Type::String + name: 'artifactUri' + description: | + Immutable. The path to the directory containing the Model artifact and any of its supporting files. Not present for AutoML Models or Large Models. + - !ruby/object:Api::Type::String + name: 'trainingPipeline' + description: | + Output only. The resource name of the TrainingPipeline that uploaded this Model, if any. + - !ruby/object:Api::Type::String + name: 'displayName' + description: | + Required. The display name of the Model. The name can be up to 128 characters long and can consist of any UTF-8 characters. + - !ruby/object:Api::Type::Array + name: 'supportedDeploymentResourcesTypes' + description: | + Output only. When this Model is deployed, its prediction resources are described by the `prediction_resources` field of the Endpoint.deployed_models object. Because not all Models support all resource configuration types, the configuration types this Model supports are listed here. If no configuration types are listed, the Model cannot be deployed to an Endpoint and does not support online predictions (PredictionService.Predict or PredictionService.Explain). Such a Model can serve predictions by using a BatchPredictionJob, if it has at least one entry each in supported_input_storage_formats and supported_output_storage_formats. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'supportedOutputStorageFormats' + description: | + Output only. The formats this Model supports in BatchPredictionJob.output_config. If both PredictSchemata.instance_schema_uri and PredictSchemata.prediction_schema_uri exist, the predictions are returned together with their instances. In other words, the prediction has the original instance data first, followed by the actual prediction content (as per the schema). The possible formats are: * `jsonl` The JSON Lines format, where each prediction is a single line. Uses GcsDestination. * `csv` The CSV format, where each prediction is a single comma-separated line. The first line in the file is the header, containing comma-separated field names. Uses GcsDestination. * `bigquery` Each prediction is a single row in a BigQuery table, uses BigQueryDestination . If this Model doesn't support any of these formats it means it cannot be used with a BatchPredictionJob. However, if it has supported_deployment_resources_types, it could serve online predictions by using PredictionService.Predict or PredictionService.Explain. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'versionAliases' + description: | + User provided version aliases so that a model version can be referenced via alias (i.e. `projects/{project}/locations/{location}/models/{model_id}@{version_alias}` instead of auto-generated version id (i.e. `projects/{project}/locations/{location}/models/{model_id}@{version_id})`. The format is a-z{0,126}[a-z0-9] to distinguish from version_id. A default version alias will be created for the first version of the model, and there must be exactly one default version alias for a model. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'versionCreateTime' + description: | + Output only. Timestamp when this version was created. + - !ruby/object:Api::Type::String + name: 'versionDescription' + description: | + The description of this version. + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + The labels with user-defined metadata to organize your Models. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + + + + - !ruby/object:Api::Resource + name: Featurestore + base_url: '{{parent}}/featurestores' + self_link: '{{name}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/vertex-ai/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + Vertex AI Feature Store provides a centralized repository for organizing, storing, and serving ML features. The Featurestore is a top-level container for your features and their values. + properties: + + - !ruby/object:Api::Type::Enum + name: 'state' + description: | + Output only. State of the featurestore. + values: + - :STATE_UNSPECIFIED + - :STABLE + - :UPDATING + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Output only. Timestamp when this Featurestore was created. + - !ruby/object:Api::Type::String + name: 'etag' + description: | + Optional. Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + - !ruby/object:Api::Type::Integer + name: 'onlineStorageTtlDays' + description: | + Optional. TTL in days for feature values that will be stored in online serving storage. The Feature Store online storage periodically removes obsolete feature values older than `online_storage_ttl_days` since the feature generation time. Note that `online_storage_ttl_days` should be less than or equal to `offline_storage_ttl_days` for each EntityType under a featurestore. If not set, default to 4000 days + - !ruby/object:Api::Type::NestedObject + name: 'encryptionSpec' + description: | + Represents a customer-managed encryption key spec that can be applied to a top-level resource. + properties: + - !ruby/object:Api::Type::String + name: 'kmsKeyName' + description: | + Required. The Cloud KMS resource identifier of the customer managed encryption key used to protect a resource. Has the form: `projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key`. The key needs to be in the same region as where the compute resource is created. + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + Optional. The labels with user-defined metadata to organize your Featurestore. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information on and examples of labels. No more than 64 user labels can be associated with one Featurestore(System labels are excluded)." System reserved label keys are prefixed with "aiplatform.googleapis.com/" and are immutable. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::String + name: 'updateTime' + description: | + Output only. Timestamp when this Featurestore was last updated. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Output only. Name of the Featurestore. Format: `projects/{project}/locations/{location}/featurestores/{featurestore}` + - !ruby/object:Api::Type::NestedObject + name: 'onlineServingConfig' + description: | + OnlineServingConfig specifies the details for provisioning online serving resources. + properties: + - !ruby/object:Api::Type::Integer + name: 'fixedNodeCount' + description: | + The number of nodes for the online store. The number of nodes doesn't scale automatically, but you can manually update the number of nodes. If set to 0, the featurestore will not have an online store and cannot be used for online serving. + - !ruby/object:Api::Type::NestedObject + name: 'scaling' + description: | + Online serving scaling configuration. If min_node_count and max_node_count are set to the same value, the cluster will be configured with the fixed number of node (no auto-scaling). + properties: + - !ruby/object:Api::Type::Integer + name: 'maxNodeCount' + description: | + The maximum number of nodes to scale up to. Must be greater than min_node_count, and less than or equal to 10 times of 'min_node_count'. + - !ruby/object:Api::Type::Integer + name: 'minNodeCount' + description: | + Required. The minimum number of nodes to scale down to. Must be greater than or equal to 1. + - !ruby/object:Api::Type::Integer + name: 'cpuUtilizationTarget' + description: | + Optional. The cpu utilization that the Autoscaler should be trying to achieve. This number is on a scale from 0 (no utilization) to 100 (total utilization), and is limited between 10 and 80. When a cluster's CPU utilization exceeds the target that you have set, Bigtable immediately adds nodes to the cluster. When CPU utilization is substantially lower than the target, Bigtable removes nodes. If not set or set to 0, default to 50. + + + + - !ruby/object:Api::Resource + name: FeaturestoreEntityTypeFeature + base_url: '{{parent}}/features' + self_link: '{{name}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/vertex-ai/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + Feature Metadata information that describes an attribute of an entity type. For example, apple is an entity type, and color is a feature that describes apple. + properties: + + - !ruby/object:Api::Type::String + name: 'description' + description: | + Description of the Feature. + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Output only. Timestamp when this EntityType was created. + - !ruby/object:Api::Type::Array + name: 'monitoringStatsAnomalies' + description: | + Output only. The list of historical stats and anomalies with specified objectives. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'etag' + description: | + Used to perform a consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + Optional. The labels with user-defined metadata to organize your Features. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information on and examples of labels. No more than 64 user labels can be associated with one Feature (System labels are excluded)." System reserved label keys are prefixed with "aiplatform.googleapis.com/" and are immutable. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::String + name: 'name' + description: | + Immutable. Name of the Feature. Format: `projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}` The last part feature is assigned by the client. The feature can be up to 64 characters long and can consist only of ASCII Latin letters A-Z and a-z, underscore(_), and ASCII digits 0-9 starting with a letter. The value will be unique given an entity type. + - !ruby/object:Api::Type::String + name: 'updateTime' + description: | + Output only. Timestamp when this EntityType was most recently updated. + - !ruby/object:Api::Type::Boolean + name: 'disableMonitoring' + description: | + Optional. If not set, use the monitoring_config defined for the EntityType this Feature belongs to. Only Features with type (Feature.ValueType) BOOL, STRING, DOUBLE or INT64 can enable monitoring. If set to true, all types of data monitoring are disabled despite the config on EntityType. + - !ruby/object:Api::Type::Enum + name: 'valueType' + description: | + Required. Immutable. Type of Feature value. + values: + - :VALUE_TYPE_UNSPECIFIED + - :BOOL + - :BOOL_ARRAY + - :DOUBLE + - :DOUBLE_ARRAY + - :INT64 + - :INT64_ARRAY + - :STRING + - :STRING_ARRAY + - :BYTES + + + + - !ruby/object:Api::Resource + name: FeaturestoreEntityTypeFeature + base_url: '{{parent}}/features' + self_link: '{{name}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/vertex-ai/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + Feature Metadata information that describes an attribute of an entity type. For example, apple is an entity type, and color is a feature that describes apple. + properties: + + - !ruby/object:Api::Type::String + name: 'description' + description: | + Description of the Feature. + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Output only. Timestamp when this EntityType was created. + - !ruby/object:Api::Type::Array + name: 'monitoringStatsAnomalies' + description: | + Output only. The list of historical stats and anomalies with specified objectives. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'etag' + description: | + Used to perform a consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + Optional. The labels with user-defined metadata to organize your Features. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information on and examples of labels. No more than 64 user labels can be associated with one Feature (System labels are excluded)." System reserved label keys are prefixed with "aiplatform.googleapis.com/" and are immutable. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::String + name: 'name' + description: | + Immutable. Name of the Feature. Format: `projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}` The last part feature is assigned by the client. The feature can be up to 64 characters long and can consist only of ASCII Latin letters A-Z and a-z, underscore(_), and ASCII digits 0-9 starting with a letter. The value will be unique given an entity type. + - !ruby/object:Api::Type::String + name: 'updateTime' + description: | + Output only. Timestamp when this EntityType was most recently updated. + - !ruby/object:Api::Type::Boolean + name: 'disableMonitoring' + description: | + Optional. If not set, use the monitoring_config defined for the EntityType this Feature belongs to. Only Features with type (Feature.ValueType) BOOL, STRING, DOUBLE or INT64 can enable monitoring. If set to true, all types of data monitoring are disabled despite the config on EntityType. + - !ruby/object:Api::Type::Enum + name: 'valueType' + description: | + Required. Immutable. Type of Feature value. + values: + - :VALUE_TYPE_UNSPECIFIED + - :BOOL + - :BOOL_ARRAY + - :DOUBLE + - :DOUBLE_ARRAY + - :INT64 + - :INT64_ARRAY + - :STRING + - :STRING_ARRAY + - :BYTES + + + + - !ruby/object:Api::Resource + name: TensorboardExperimentRun + base_url: '{{parent}}/runs' + self_link: '{{name}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/vertex-ai/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + TensorboardRun maps to a specific execution of a training job with a given set of hyperparameter values, model definition, dataset, etc + properties: + + - !ruby/object:Api::Type::String + name: 'displayName' + description: | + Required. User provided name of this TensorboardRun. This value must be unique among all TensorboardRuns belonging to the same parent TensorboardExperiment. + - !ruby/object:Api::Type::String + name: 'updateTime' + description: | + Output only. Timestamp when this TensorboardRun was last updated. + - !ruby/object:Api::Type::String + name: 'description' + description: | + Description of this TensorboardRun. + - !ruby/object:Api::Type::String + name: 'etag' + description: | + Used to perform a consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + The labels with user-defined metadata to organize your TensorboardRuns. This field will be used to filter and visualize Runs in the Tensorboard UI. For example, a Vertex AI training job can set a label aiplatform.googleapis.com/training_job_id=xxxxx to all the runs created within that job. An end user can set a label experiment_id=xxxxx for all the runs produced in a Jupyter notebook. These runs can be grouped by a label value and visualized together in the Tensorboard UI. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. No more than 64 user labels can be associated with one TensorboardRun (System labels are excluded). See https://goo.gl/xmQnxf for more information and examples of labels. System reserved label keys are prefixed with "aiplatform.googleapis.com/" and are immutable. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Output only. Timestamp when this TensorboardRun was created. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Output only. Name of the TensorboardRun. Format: `projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}` + + + + - !ruby/object:Api::Resource + name: DatasetDataItemAnnotation + base_url: '{{parent}}/annotations' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/vertex-ai/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + Used to assign specific AnnotationSpec to a particular area of a DataItem or the whole part of the DataItem. + properties: + + - !ruby/object:Api::Type::String + name: 'payloadSchemaUri' + description: | + Required. Google Cloud Storage URI points to a YAML file describing payload. The schema is defined as an [OpenAPI 3.0.2 Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). The schema files that can be used here are found in gs://google-cloud-aiplatform/schema/dataset/annotation/, note that the chosen schema must be consistent with the parent Dataset's metadata. + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Output only. Timestamp when this Annotation was created. + - !ruby/object:Api::Type::String + name: 'etag' + description: | + Optional. Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + Optional. The labels with user-defined metadata to organize your Annotations. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. No more than 64 user labels can be associated with one Annotation(System labels are excluded). See https://goo.gl/xmQnxf for more information and examples of labels. System reserved label keys are prefixed with "aiplatform.googleapis.com/" and are immutable. Following system labels exist for each Annotation: * "aiplatform.googleapis.com/annotation_set_name": optional, name of the UI's annotation set this Annotation belongs to. If not set, the Annotation is not visible in the UI. * "aiplatform.googleapis.com/payload_schema": output only, its value is the payload_schema's title. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::String + name: 'updateTime' + description: | + Output only. Timestamp when this Annotation was last updated. + - !ruby/object:Api::Type::String + name: 'payload' + description: | + Required. The schema of the payload can be found in payload_schema. + - !ruby/object:Api::Type::String + name: 'annotationSource' + description: | + Output only. The source of the Annotation. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Output only. Resource name of the Annotation. + + + + - !ruby/object:Api::Resource + name: ModelDeploymentMonitoringJob + base_url: '{{parent}}/modelDeploymentMonitoringJobs' + self_link: '{{name}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/vertex-ai/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + Represents a job that runs periodically to monitor the deployed models in an endpoint. It will analyze the logged training & prediction data to detect any abnormal behaviors. + properties: + + - !ruby/object:Api::Type::Array + name: 'modelDeploymentMonitoringObjectiveConfigs' + description: | + Required. The config for monitoring objectives. This is a per DeployedModel config. Each DeployedModel needs to be configured separately. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::NestedObject + name: 'objectiveConfig' + description: | + The objective configuration for model monitoring, including the information needed to detect anomalies for one particular model. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'explanationConfig' + description: | + The config for integrating with Vertex Explainable AI. Only applicable if the Model has explanation_spec populated. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'explanationBaseline' + description: | + Output from BatchPredictionJob for Model Monitoring baseline dataset, which can be used to generate baseline attribution scores. + properties: + - !ruby/object:Api::Type::Enum + name: 'predictionFormat' + description: | + The storage format of the predictions generated BatchPrediction job. + values: + - :PREDICTION_FORMAT_UNSPECIFIED + - :JSONL + - :BIGQUERY + - !ruby/object:Api::Type::NestedObject + name: 'gcs' + description: | + The Google Cloud Storage location where the output is to be written to. + properties: + - !ruby/object:Api::Type::String + name: 'outputUriPrefix' + description: | + Required. Google Cloud Storage URI to output directory. If the uri doesn't end with '/', a '/' will be automatically appended. The directory is created if it doesn't exist. + - !ruby/object:Api::Type::NestedObject + name: 'bigquery' + description: | + The BigQuery location for the output content. + properties: + - !ruby/object:Api::Type::String + name: 'outputUri' + description: | + Required. BigQuery URI to a project or table, up to 2000 characters long. When only the project is specified, the Dataset and Table is created. When the full table reference is specified, the Dataset must exist and table must not exist. Accepted forms: * BigQuery path. For example: `bq://projectId` or `bq://projectId.bqDatasetId` or `bq://projectId.bqDatasetId.bqTableId`. + - !ruby/object:Api::Type::Boolean + name: 'enableFeatureAttributes' + description: | + If want to analyze the Vertex Explainable AI feature attribute scores or not. If set to true, Vertex AI will log the feature attributions from explain response and do the skew/drift detection for them. + - !ruby/object:Api::Type::NestedObject + name: 'trainingDataset' + description: | + Training Dataset information. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'loggingSamplingStrategy' + description: | + Sampling Strategy for logging, can be for both training and prediction dataset. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'randomSampleConfig' + description: | + Requests are randomly selected. + properties: + - !ruby/object:Api::Type::Integer + name: 'sampleRate' + description: | + Sample rate (0, 1] + - !ruby/object:Api::Type::String + name: 'dataset' + description: | + The resource name of the Dataset used to train this Model. + - !ruby/object:Api::Type::NestedObject + name: 'gcsSource' + description: | + The Google Cloud Storage location for the input content. + properties: + - !ruby/object:Api::Type::Array + name: 'uris' + description: | + Required. Google Cloud Storage URI(-s) to the input file(s). May contain wildcards. For more information on wildcards, see https://cloud.google.com/storage/docs/gsutil/addlhelp/WildcardNames. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'targetField' + description: | + The target field name the model is to predict. This field will be excluded when doing Predict and (or) Explain for the training data. + - !ruby/object:Api::Type::NestedObject + name: 'bigquerySource' + description: | + The BigQuery location for the input content. + properties: + - !ruby/object:Api::Type::String + name: 'inputUri' + description: | + Required. BigQuery URI to a table, up to 2000 characters long. Accepted forms: * BigQuery path. For example: `bq://projectId.bqDatasetId.bqTableId`. + - !ruby/object:Api::Type::String + name: 'dataFormat' + description: | + Data format of the dataset, only applicable if the input is from Google Cloud Storage. The possible formats are: "tf-record" The source file is a TFRecord file. "csv" The source file is a CSV file. "jsonl" The source file is a JSONL file. + - !ruby/object:Api::Type::NestedObject + name: 'trainingPredictionSkewDetectionConfig' + description: | + The config for Training & Prediction data skew detection. It specifies the training dataset sources and the skew detection parameters. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'skewThresholds' + description: | + Key is the feature name and value is the threshold. If a feature needs to be monitored for skew, a value threshold must be configured for that feature. The threshold here is against feature distribution distance between the training and prediction feature. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + The config for feature monitoring threshold. + - !ruby/object:Api::Type::NestedObject + name: 'defaultSkewThreshold' + description: | + The config for feature monitoring threshold. + properties: + - !ruby/object:Api::Type::Integer + name: 'value' + description: | + Specify a threshold value that can trigger the alert. If this threshold config is for feature distribution distance: 1. For categorical feature, the distribution distance is calculated by L-inifinity norm. 2. For numerical feature, the distribution distance is calculated by Jensen–Shannon divergence. Each feature must have a non-zero threshold if they need to be monitored. Otherwise no alert will be triggered for that feature. + - !ruby/object:Api::Type::NestedObject + name: 'attributionScoreSkewThresholds' + description: | + Key is the feature name and value is the threshold. The threshold here is against attribution score distance between the training and prediction feature. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + The config for feature monitoring threshold. + - !ruby/object:Api::Type::NestedObject + name: 'predictionDriftDetectionConfig' + description: | + The config for Prediction data drift detection. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'attributionScoreDriftThresholds' + description: | + Key is the feature name and value is the threshold. The threshold here is against attribution score distance between different time windows. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + The config for feature monitoring threshold. + - !ruby/object:Api::Type::NestedObject + name: 'driftThresholds' + description: | + Key is the feature name and value is the threshold. If a feature needs to be monitored for drift, a value threshold must be configured for that feature. The threshold here is against feature distribution distance between different time windws. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + The config for feature monitoring threshold. + - !ruby/object:Api::Type::NestedObject + name: 'defaultDriftThreshold' + description: | + The config for feature monitoring threshold. + properties: + - !ruby/object:Api::Type::Integer + name: 'value' + description: | + Specify a threshold value that can trigger the alert. If this threshold config is for feature distribution distance: 1. For categorical feature, the distribution distance is calculated by L-inifinity norm. 2. For numerical feature, the distribution distance is calculated by Jensen–Shannon divergence. Each feature must have a non-zero threshold if they need to be monitored. Otherwise no alert will be triggered for that feature. + - !ruby/object:Api::Type::String + name: 'deployedModelId' + description: | + The DeployedModel ID of the objective config. + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + The labels with user-defined metadata to organize your ModelDeploymentMonitoringJob. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::Enum + name: 'state' + description: | + Output only. The detailed state of the monitoring job. When the job is still creating, the state will be 'PENDING'. Once the job is successfully created, the state will be 'RUNNING'. Pause the job, the state will be 'PAUSED'. Resume the job, the state will return to 'RUNNING'. + values: + - :JOB_STATE_UNSPECIFIED + - :JOB_STATE_QUEUED + - :JOB_STATE_PENDING + - :JOB_STATE_RUNNING + - :JOB_STATE_SUCCEEDED + - :JOB_STATE_FAILED + - :JOB_STATE_CANCELLING + - :JOB_STATE_CANCELLED + - :JOB_STATE_PAUSED + - :JOB_STATE_EXPIRED + - :JOB_STATE_UPDATING + - :JOB_STATE_PARTIALLY_SUCCEEDED + - !ruby/object:Api::Type::String + name: 'analysisInstanceSchemaUri' + description: | + YAML schema file uri describing the format of a single instance that you want Tensorflow Data Validation (TFDV) to analyze. If this field is empty, all the feature data types are inferred from predict_instance_schema_uri, meaning that TFDV will use the data in the exact format(data type) as prediction request/response. If there are any data type differences between predict instance and TFDV instance, this field can be used to override the schema. For models trained with Vertex AI, this field must be set as all the fields in predict instance formatted as string. + - !ruby/object:Api::Type::Boolean + name: 'enableMonitoringPipelineLogs' + description: | + If true, the scheduled monitoring pipeline logs are sent to Google Cloud Logging, including pipeline status and anomalies detected. Please note the logs incur cost, which are subject to [Cloud Logging pricing](https://cloud.google.com/logging#pricing). + - !ruby/object:Api::Type::String + name: 'endpoint' + description: | + Required. Endpoint resource name. Format: `projects/{project}/locations/{location}/endpoints/{endpoint}` + - !ruby/object:Api::Type::NestedObject + name: 'loggingSamplingStrategy' + description: | + Sampling Strategy for logging, can be for both training and prediction dataset. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'randomSampleConfig' + description: | + Requests are randomly selected. + properties: + - !ruby/object:Api::Type::Integer + name: 'sampleRate' + description: | + Sample rate (0, 1] + - !ruby/object:Api::Type::Array + name: 'bigqueryTables' + description: | + Output only. The created bigquery tables for the job under customer project. Customer could do their own query & analysis. There could be 4 log tables in maximum: 1. Training data logging predict request/response 2. Serving data logging predict request/response + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::Enum + name: 'logSource' + description: | + The source of log. + values: + - :LOG_SOURCE_UNSPECIFIED + - :TRAINING + - :SERVING + - !ruby/object:Api::Type::String + name: 'bigqueryTablePath' + description: | + The created BigQuery table to store logs. Customer could do their own query & analysis. Format: `bq://.model_deployment_monitoring_._` + - !ruby/object:Api::Type::Enum + name: 'logType' + description: | + The type of log. + values: + - :LOG_TYPE_UNSPECIFIED + - :PREDICT + - :EXPLAIN + - !ruby/object:Api::Type::String + name: 'displayName' + description: | + Required. The user-defined name of the ModelDeploymentMonitoringJob. The name can be up to 128 characters long and can consist of any UTF-8 characters. Display name of a ModelDeploymentMonitoringJob. + - !ruby/object:Api::Type::Enum + name: 'scheduleState' + description: | + Output only. Schedule state when the monitoring job is in Running state. + values: + - :MONITORING_SCHEDULE_STATE_UNSPECIFIED + - :PENDING + - :OFFLINE + - :RUNNING + - !ruby/object:Api::Type::NestedObject + name: 'error' + description: | + The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). + properties: + - !ruby/object:Api::Type::Array + name: 'details' + description: | + A list of messages that carry the error details. There is a common set of message types for APIs to use. + item_type: Api::Type::String + - !ruby/object:Api::Type::Integer + name: 'code' + description: | + The status code, which should be an enum value of google.rpc.Code. + - !ruby/object:Api::Type::String + name: 'message' + description: | + A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client. + - !ruby/object:Api::Type::NestedObject + name: 'modelMonitoringAlertConfig' + description: | + + properties: + - !ruby/object:Api::Type::NestedObject + name: 'emailAlertConfig' + description: | + The config for email alert. + properties: + - !ruby/object:Api::Type::Array + name: 'userEmails' + description: | + The email addresses to send the alert. + item_type: Api::Type::String + - !ruby/object:Api::Type::Boolean + name: 'enableLogging' + description: | + Dump the anomalies to Cloud Logging. The anomalies will be put to json payload encoded from proto google.cloud.aiplatform.logging.ModelMonitoringAnomaliesLogEntry. This can be further sinked to Pub/Sub or any other services supported by Cloud Logging. + - !ruby/object:Api::Type::NestedObject + name: 'latestMonitoringPipelineMetadata' + description: | + All metadata of most recent monitoring pipelines. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'status' + description: | + The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). + properties: + - !ruby/object:Api::Type::Array + name: 'details' + description: | + A list of messages that carry the error details. There is a common set of message types for APIs to use. + item_type: Api::Type::String + - !ruby/object:Api::Type::Integer + name: 'code' + description: | + The status code, which should be an enum value of google.rpc.Code. + - !ruby/object:Api::Type::String + name: 'message' + description: | + A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client. + - !ruby/object:Api::Type::String + name: 'runTime' + description: | + The time that most recent monitoring pipelines that is related to this run. + - !ruby/object:Api::Type::String + name: 'samplePredictInstance' + description: | + Sample Predict instance, same format as PredictRequest.instances, this can be set as a replacement of ModelDeploymentMonitoringJob.predict_instance_schema_uri. If not set, we will generate predict schema from collected predict requests. + - !ruby/object:Api::Type::String + name: 'predictInstanceSchemaUri' + description: | + YAML schema file uri describing the format of a single instance, which are given to format this Endpoint's prediction (and explanation). If not set, we will generate predict schema from collected predict requests. + - !ruby/object:Api::Type::String + name: 'nextScheduleTime' + description: | + Output only. Timestamp when this monitoring pipeline will be scheduled to run for the next round. + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Output only. Timestamp when this ModelDeploymentMonitoringJob was created. + - !ruby/object:Api::Type::String + name: 'logTtl' + description: | + The TTL of BigQuery tables in user projects which stores logs. A day is the basic unit of the TTL and we take the ceil of TTL/86400(a day). e.g. { second: 3600} indicates ttl = 1 day. + - !ruby/object:Api::Type::NestedObject + name: 'statsAnomaliesBaseDirectory' + description: | + The Google Cloud Storage location where the output is to be written to. + properties: + - !ruby/object:Api::Type::String + name: 'outputUriPrefix' + description: | + Required. Google Cloud Storage URI to output directory. If the uri doesn't end with '/', a '/' will be automatically appended. The directory is created if it doesn't exist. + - !ruby/object:Api::Type::String + name: 'updateTime' + description: | + Output only. Timestamp when this ModelDeploymentMonitoringJob was updated most recently. + - !ruby/object:Api::Type::NestedObject + name: 'modelDeploymentMonitoringScheduleConfig' + description: | + The config for scheduling monitoring job. + properties: + - !ruby/object:Api::Type::String + name: 'monitorWindow' + description: | + The time window of the prediction data being included in each prediction dataset. This window specifies how long the data should be collected from historical model results for each run. If not set, ModelDeploymentMonitoringScheduleConfig.monitor_interval will be used. e.g. If currently the cutoff time is 2022-01-08 14:30:00 and the monitor_window is set to be 3600, then data from 2022-01-08 13:30:00 to 2022-01-08 14:30:00 will be retrieved and aggregated to calculate the monitoring statistics. + - !ruby/object:Api::Type::String + name: 'monitorInterval' + description: | + Required. The model monitoring job scheduling interval. It will be rounded up to next full hour. This defines how often the monitoring jobs are triggered. + - !ruby/object:Api::Type::NestedObject + name: 'encryptionSpec' + description: | + Represents a customer-managed encryption key spec that can be applied to a top-level resource. + properties: + - !ruby/object:Api::Type::String + name: 'kmsKeyName' + description: | + Required. The Cloud KMS resource identifier of the customer managed encryption key used to protect a resource. Has the form: `projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key`. The key needs to be in the same region as where the compute resource is created. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Output only. Resource name of a ModelDeploymentMonitoringJob. + + + + - !ruby/object:Api::Resource + name: ModelDeploymentMonitoringJob + base_url: '{{parent}}/modelDeploymentMonitoringJobs' + self_link: '{{name}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/vertex-ai/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + Represents a job that runs periodically to monitor the deployed models in an endpoint. It will analyze the logged training & prediction data to detect any abnormal behaviors. + properties: + + - !ruby/object:Api::Type::Array + name: 'modelDeploymentMonitoringObjectiveConfigs' + description: | + Required. The config for monitoring objectives. This is a per DeployedModel config. Each DeployedModel needs to be configured separately. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::NestedObject + name: 'objectiveConfig' + description: | + The objective configuration for model monitoring, including the information needed to detect anomalies for one particular model. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'explanationConfig' + description: | + The config for integrating with Vertex Explainable AI. Only applicable if the Model has explanation_spec populated. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'explanationBaseline' + description: | + Output from BatchPredictionJob for Model Monitoring baseline dataset, which can be used to generate baseline attribution scores. + properties: + - !ruby/object:Api::Type::Enum + name: 'predictionFormat' + description: | + The storage format of the predictions generated BatchPrediction job. + values: + - :PREDICTION_FORMAT_UNSPECIFIED + - :JSONL + - :BIGQUERY + - !ruby/object:Api::Type::NestedObject + name: 'gcs' + description: | + The Google Cloud Storage location where the output is to be written to. + properties: + - !ruby/object:Api::Type::String + name: 'outputUriPrefix' + description: | + Required. Google Cloud Storage URI to output directory. If the uri doesn't end with '/', a '/' will be automatically appended. The directory is created if it doesn't exist. + - !ruby/object:Api::Type::NestedObject + name: 'bigquery' + description: | + The BigQuery location for the output content. + properties: + - !ruby/object:Api::Type::String + name: 'outputUri' + description: | + Required. BigQuery URI to a project or table, up to 2000 characters long. When only the project is specified, the Dataset and Table is created. When the full table reference is specified, the Dataset must exist and table must not exist. Accepted forms: * BigQuery path. For example: `bq://projectId` or `bq://projectId.bqDatasetId` or `bq://projectId.bqDatasetId.bqTableId`. + - !ruby/object:Api::Type::Boolean + name: 'enableFeatureAttributes' + description: | + If want to analyze the Vertex Explainable AI feature attribute scores or not. If set to true, Vertex AI will log the feature attributions from explain response and do the skew/drift detection for them. + - !ruby/object:Api::Type::NestedObject + name: 'trainingDataset' + description: | + Training Dataset information. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'loggingSamplingStrategy' + description: | + Sampling Strategy for logging, can be for both training and prediction dataset. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'randomSampleConfig' + description: | + Requests are randomly selected. + properties: + - !ruby/object:Api::Type::Integer + name: 'sampleRate' + description: | + Sample rate (0, 1] + - !ruby/object:Api::Type::String + name: 'dataset' + description: | + The resource name of the Dataset used to train this Model. + - !ruby/object:Api::Type::NestedObject + name: 'gcsSource' + description: | + The Google Cloud Storage location for the input content. + properties: + - !ruby/object:Api::Type::Array + name: 'uris' + description: | + Required. Google Cloud Storage URI(-s) to the input file(s). May contain wildcards. For more information on wildcards, see https://cloud.google.com/storage/docs/gsutil/addlhelp/WildcardNames. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'targetField' + description: | + The target field name the model is to predict. This field will be excluded when doing Predict and (or) Explain for the training data. + - !ruby/object:Api::Type::NestedObject + name: 'bigquerySource' + description: | + The BigQuery location for the input content. + properties: + - !ruby/object:Api::Type::String + name: 'inputUri' + description: | + Required. BigQuery URI to a table, up to 2000 characters long. Accepted forms: * BigQuery path. For example: `bq://projectId.bqDatasetId.bqTableId`. + - !ruby/object:Api::Type::String + name: 'dataFormat' + description: | + Data format of the dataset, only applicable if the input is from Google Cloud Storage. The possible formats are: "tf-record" The source file is a TFRecord file. "csv" The source file is a CSV file. "jsonl" The source file is a JSONL file. + - !ruby/object:Api::Type::NestedObject + name: 'trainingPredictionSkewDetectionConfig' + description: | + The config for Training & Prediction data skew detection. It specifies the training dataset sources and the skew detection parameters. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'skewThresholds' + description: | + Key is the feature name and value is the threshold. If a feature needs to be monitored for skew, a value threshold must be configured for that feature. The threshold here is against feature distribution distance between the training and prediction feature. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + The config for feature monitoring threshold. + - !ruby/object:Api::Type::NestedObject + name: 'defaultSkewThreshold' + description: | + The config for feature monitoring threshold. + properties: + - !ruby/object:Api::Type::Integer + name: 'value' + description: | + Specify a threshold value that can trigger the alert. If this threshold config is for feature distribution distance: 1. For categorical feature, the distribution distance is calculated by L-inifinity norm. 2. For numerical feature, the distribution distance is calculated by Jensen–Shannon divergence. Each feature must have a non-zero threshold if they need to be monitored. Otherwise no alert will be triggered for that feature. + - !ruby/object:Api::Type::NestedObject + name: 'attributionScoreSkewThresholds' + description: | + Key is the feature name and value is the threshold. The threshold here is against attribution score distance between the training and prediction feature. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + The config for feature monitoring threshold. + - !ruby/object:Api::Type::NestedObject + name: 'predictionDriftDetectionConfig' + description: | + The config for Prediction data drift detection. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'attributionScoreDriftThresholds' + description: | + Key is the feature name and value is the threshold. The threshold here is against attribution score distance between different time windows. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + The config for feature monitoring threshold. + - !ruby/object:Api::Type::NestedObject + name: 'driftThresholds' + description: | + Key is the feature name and value is the threshold. If a feature needs to be monitored for drift, a value threshold must be configured for that feature. The threshold here is against feature distribution distance between different time windws. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + The config for feature monitoring threshold. + - !ruby/object:Api::Type::NestedObject + name: 'defaultDriftThreshold' + description: | + The config for feature monitoring threshold. + properties: + - !ruby/object:Api::Type::Integer + name: 'value' + description: | + Specify a threshold value that can trigger the alert. If this threshold config is for feature distribution distance: 1. For categorical feature, the distribution distance is calculated by L-inifinity norm. 2. For numerical feature, the distribution distance is calculated by Jensen–Shannon divergence. Each feature must have a non-zero threshold if they need to be monitored. Otherwise no alert will be triggered for that feature. + - !ruby/object:Api::Type::String + name: 'deployedModelId' + description: | + The DeployedModel ID of the objective config. + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + The labels with user-defined metadata to organize your ModelDeploymentMonitoringJob. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::Enum + name: 'state' + description: | + Output only. The detailed state of the monitoring job. When the job is still creating, the state will be 'PENDING'. Once the job is successfully created, the state will be 'RUNNING'. Pause the job, the state will be 'PAUSED'. Resume the job, the state will return to 'RUNNING'. + values: + - :JOB_STATE_UNSPECIFIED + - :JOB_STATE_QUEUED + - :JOB_STATE_PENDING + - :JOB_STATE_RUNNING + - :JOB_STATE_SUCCEEDED + - :JOB_STATE_FAILED + - :JOB_STATE_CANCELLING + - :JOB_STATE_CANCELLED + - :JOB_STATE_PAUSED + - :JOB_STATE_EXPIRED + - :JOB_STATE_UPDATING + - :JOB_STATE_PARTIALLY_SUCCEEDED + - !ruby/object:Api::Type::String + name: 'analysisInstanceSchemaUri' + description: | + YAML schema file uri describing the format of a single instance that you want Tensorflow Data Validation (TFDV) to analyze. If this field is empty, all the feature data types are inferred from predict_instance_schema_uri, meaning that TFDV will use the data in the exact format(data type) as prediction request/response. If there are any data type differences between predict instance and TFDV instance, this field can be used to override the schema. For models trained with Vertex AI, this field must be set as all the fields in predict instance formatted as string. + - !ruby/object:Api::Type::Boolean + name: 'enableMonitoringPipelineLogs' + description: | + If true, the scheduled monitoring pipeline logs are sent to Google Cloud Logging, including pipeline status and anomalies detected. Please note the logs incur cost, which are subject to [Cloud Logging pricing](https://cloud.google.com/logging#pricing). + - !ruby/object:Api::Type::String + name: 'endpoint' + description: | + Required. Endpoint resource name. Format: `projects/{project}/locations/{location}/endpoints/{endpoint}` + - !ruby/object:Api::Type::NestedObject + name: 'loggingSamplingStrategy' + description: | + Sampling Strategy for logging, can be for both training and prediction dataset. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'randomSampleConfig' + description: | + Requests are randomly selected. + properties: + - !ruby/object:Api::Type::Integer + name: 'sampleRate' + description: | + Sample rate (0, 1] + - !ruby/object:Api::Type::Array + name: 'bigqueryTables' + description: | + Output only. The created bigquery tables for the job under customer project. Customer could do their own query & analysis. There could be 4 log tables in maximum: 1. Training data logging predict request/response 2. Serving data logging predict request/response + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::Enum + name: 'logSource' + description: | + The source of log. + values: + - :LOG_SOURCE_UNSPECIFIED + - :TRAINING + - :SERVING + - !ruby/object:Api::Type::String + name: 'bigqueryTablePath' + description: | + The created BigQuery table to store logs. Customer could do their own query & analysis. Format: `bq://.model_deployment_monitoring_._` + - !ruby/object:Api::Type::Enum + name: 'logType' + description: | + The type of log. + values: + - :LOG_TYPE_UNSPECIFIED + - :PREDICT + - :EXPLAIN + - !ruby/object:Api::Type::String + name: 'displayName' + description: | + Required. The user-defined name of the ModelDeploymentMonitoringJob. The name can be up to 128 characters long and can consist of any UTF-8 characters. Display name of a ModelDeploymentMonitoringJob. + - !ruby/object:Api::Type::Enum + name: 'scheduleState' + description: | + Output only. Schedule state when the monitoring job is in Running state. + values: + - :MONITORING_SCHEDULE_STATE_UNSPECIFIED + - :PENDING + - :OFFLINE + - :RUNNING + - !ruby/object:Api::Type::NestedObject + name: 'error' + description: | + The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). + properties: + - !ruby/object:Api::Type::Array + name: 'details' + description: | + A list of messages that carry the error details. There is a common set of message types for APIs to use. + item_type: Api::Type::String + - !ruby/object:Api::Type::Integer + name: 'code' + description: | + The status code, which should be an enum value of google.rpc.Code. + - !ruby/object:Api::Type::String + name: 'message' + description: | + A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client. + - !ruby/object:Api::Type::NestedObject + name: 'modelMonitoringAlertConfig' + description: | + + properties: + - !ruby/object:Api::Type::NestedObject + name: 'emailAlertConfig' + description: | + The config for email alert. + properties: + - !ruby/object:Api::Type::Array + name: 'userEmails' + description: | + The email addresses to send the alert. + item_type: Api::Type::String + - !ruby/object:Api::Type::Boolean + name: 'enableLogging' + description: | + Dump the anomalies to Cloud Logging. The anomalies will be put to json payload encoded from proto google.cloud.aiplatform.logging.ModelMonitoringAnomaliesLogEntry. This can be further sinked to Pub/Sub or any other services supported by Cloud Logging. + - !ruby/object:Api::Type::NestedObject + name: 'latestMonitoringPipelineMetadata' + description: | + All metadata of most recent monitoring pipelines. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'status' + description: | + The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). + properties: + - !ruby/object:Api::Type::Array + name: 'details' + description: | + A list of messages that carry the error details. There is a common set of message types for APIs to use. + item_type: Api::Type::String + - !ruby/object:Api::Type::Integer + name: 'code' + description: | + The status code, which should be an enum value of google.rpc.Code. + - !ruby/object:Api::Type::String + name: 'message' + description: | + A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client. + - !ruby/object:Api::Type::String + name: 'runTime' + description: | + The time that most recent monitoring pipelines that is related to this run. + - !ruby/object:Api::Type::String + name: 'samplePredictInstance' + description: | + Sample Predict instance, same format as PredictRequest.instances, this can be set as a replacement of ModelDeploymentMonitoringJob.predict_instance_schema_uri. If not set, we will generate predict schema from collected predict requests. + - !ruby/object:Api::Type::String + name: 'predictInstanceSchemaUri' + description: | + YAML schema file uri describing the format of a single instance, which are given to format this Endpoint's prediction (and explanation). If not set, we will generate predict schema from collected predict requests. + - !ruby/object:Api::Type::String + name: 'nextScheduleTime' + description: | + Output only. Timestamp when this monitoring pipeline will be scheduled to run for the next round. + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Output only. Timestamp when this ModelDeploymentMonitoringJob was created. + - !ruby/object:Api::Type::String + name: 'logTtl' + description: | + The TTL of BigQuery tables in user projects which stores logs. A day is the basic unit of the TTL and we take the ceil of TTL/86400(a day). e.g. { second: 3600} indicates ttl = 1 day. + - !ruby/object:Api::Type::NestedObject + name: 'statsAnomaliesBaseDirectory' + description: | + The Google Cloud Storage location where the output is to be written to. + properties: + - !ruby/object:Api::Type::String + name: 'outputUriPrefix' + description: | + Required. Google Cloud Storage URI to output directory. If the uri doesn't end with '/', a '/' will be automatically appended. The directory is created if it doesn't exist. + - !ruby/object:Api::Type::String + name: 'updateTime' + description: | + Output only. Timestamp when this ModelDeploymentMonitoringJob was updated most recently. + - !ruby/object:Api::Type::NestedObject + name: 'modelDeploymentMonitoringScheduleConfig' + description: | + The config for scheduling monitoring job. + properties: + - !ruby/object:Api::Type::String + name: 'monitorWindow' + description: | + The time window of the prediction data being included in each prediction dataset. This window specifies how long the data should be collected from historical model results for each run. If not set, ModelDeploymentMonitoringScheduleConfig.monitor_interval will be used. e.g. If currently the cutoff time is 2022-01-08 14:30:00 and the monitor_window is set to be 3600, then data from 2022-01-08 13:30:00 to 2022-01-08 14:30:00 will be retrieved and aggregated to calculate the monitoring statistics. + - !ruby/object:Api::Type::String + name: 'monitorInterval' + description: | + Required. The model monitoring job scheduling interval. It will be rounded up to next full hour. This defines how often the monitoring jobs are triggered. + - !ruby/object:Api::Type::NestedObject + name: 'encryptionSpec' + description: | + Represents a customer-managed encryption key spec that can be applied to a top-level resource. + properties: + - !ruby/object:Api::Type::String + name: 'kmsKeyName' + description: | + Required. The Cloud KMS resource identifier of the customer managed encryption key used to protect a resource. Has the form: `projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key`. The key needs to be in the same region as where the compute resource is created. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Output only. Resource name of a ModelDeploymentMonitoringJob. + + + + - !ruby/object:Api::Resource + name: MetadataStoresMetadataSchema + base_url: '{{parent}}/metadataSchemas' + self_link: '{{name}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/vertex-ai/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + Instance of a general MetadataSchema. + properties: + + - !ruby/object:Api::Type::Enum + name: 'schemaType' + description: | + The type of the MetadataSchema. This is a property that identifies which metadata types will use the MetadataSchema. + values: + - :METADATA_SCHEMA_TYPE_UNSPECIFIED + - :ARTIFACT_TYPE + - :EXECUTION_TYPE + - :CONTEXT_TYPE + - !ruby/object:Api::Type::String + name: 'description' + description: | + Description of the Metadata Schema + - !ruby/object:Api::Type::String + name: 'schemaVersion' + description: | + The version of the MetadataSchema. The version's format must match the following regular expression: `^[0-9]+.+.+$`, which would allow to order/compare different versions. Example: 1.0.0, 1.0.1, etc. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Output only. The resource name of the MetadataSchema. + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Output only. Timestamp when this MetadataSchema was created. + - !ruby/object:Api::Type::String + name: 'schema' + description: | + Required. The raw YAML string representation of the MetadataSchema. The combination of [MetadataSchema.version] and the schema name given by `title` in [MetadataSchema.schema] must be unique within a MetadataStore. The schema is defined as an OpenAPI 3.0.2 [MetadataSchema Object](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.2.md#schemaObject) + + + + - !ruby/object:Api::Resource + name: MetadataStoresMetadataSchema + base_url: '{{parent}}/metadataSchemas' + self_link: '{{name}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/vertex-ai/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + Instance of a general MetadataSchema. + properties: + + - !ruby/object:Api::Type::Enum + name: 'schemaType' + description: | + The type of the MetadataSchema. This is a property that identifies which metadata types will use the MetadataSchema. + values: + - :METADATA_SCHEMA_TYPE_UNSPECIFIED + - :ARTIFACT_TYPE + - :EXECUTION_TYPE + - :CONTEXT_TYPE + - !ruby/object:Api::Type::String + name: 'description' + description: | + Description of the Metadata Schema + - !ruby/object:Api::Type::String + name: 'schemaVersion' + description: | + The version of the MetadataSchema. The version's format must match the following regular expression: `^[0-9]+.+.+$`, which would allow to order/compare different versions. Example: 1.0.0, 1.0.1, etc. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Output only. The resource name of the MetadataSchema. + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Output only. Timestamp when this MetadataSchema was created. + - !ruby/object:Api::Type::String + name: 'schema' + description: | + Required. The raw YAML string representation of the MetadataSchema. The combination of [MetadataSchema.version] and the schema name given by `title` in [MetadataSchema.schema] must be unique within a MetadataStore. The schema is defined as an OpenAPI 3.0.2 [MetadataSchema Object](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.2.md#schemaObject) + + + + - !ruby/object:Api::Resource + name: MetadataStoresExecution + base_url: '{{parent}}/executions' + self_link: '{{name}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/vertex-ai/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + Instance of a general execution. + properties: + + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + The labels with user-defined metadata to organize your Executions. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. No more than 64 user labels can be associated with one Execution (System labels are excluded). + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Output only. Timestamp when this Execution was created. + - !ruby/object:Api::Type::String + name: 'schemaVersion' + description: | + The version of the schema in `schema_title` to use. Schema title and version is expected to be registered in earlier Create Schema calls. And both are used together as unique identifiers to identify schemas within the local metadata store. + - !ruby/object:Api::Type::Enum + name: 'state' + description: | + The state of this Execution. This is a property of the Execution, and does not imply or capture any ongoing process. This property is managed by clients (such as Vertex AI Pipelines) and the system does not prescribe or check the validity of state transitions. + values: + - :STATE_UNSPECIFIED + - :NEW + - :RUNNING + - :COMPLETE + - :FAILED + - :CACHED + - :CANCELLED + - !ruby/object:Api::Type::String + name: 'name' + description: | + Output only. The resource name of the Execution. + - !ruby/object:Api::Type::String + name: 'etag' + description: | + An eTag used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + - !ruby/object:Api::Type::String + name: 'displayName' + description: | + User provided display name of the Execution. May be up to 128 Unicode characters. + - !ruby/object:Api::Type::NestedObject + name: 'metadata' + description: | + Properties of the Execution. Top level metadata keys' heading and trailing spaces will be trimmed. The size of this field should not exceed 200KB. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + Properties of the object. + - !ruby/object:Api::Type::String + name: 'schemaTitle' + description: | + The title of the schema describing the metadata. Schema title and version is expected to be registered in earlier Create Schema calls. And both are used together as unique identifiers to identify schemas within the local metadata store. + - !ruby/object:Api::Type::String + name: 'description' + description: | + Description of the Execution + - !ruby/object:Api::Type::String + name: 'updateTime' + description: | + Output only. Timestamp when this Execution was last updated. + + + + - !ruby/object:Api::Resource + name: MetadataStoresExecution + base_url: '{{parent}}/executions' + self_link: '{{name}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/vertex-ai/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + Instance of a general execution. + properties: + + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + The labels with user-defined metadata to organize your Executions. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. No more than 64 user labels can be associated with one Execution (System labels are excluded). + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Output only. Timestamp when this Execution was created. + - !ruby/object:Api::Type::String + name: 'schemaVersion' + description: | + The version of the schema in `schema_title` to use. Schema title and version is expected to be registered in earlier Create Schema calls. And both are used together as unique identifiers to identify schemas within the local metadata store. + - !ruby/object:Api::Type::Enum + name: 'state' + description: | + The state of this Execution. This is a property of the Execution, and does not imply or capture any ongoing process. This property is managed by clients (such as Vertex AI Pipelines) and the system does not prescribe or check the validity of state transitions. + values: + - :STATE_UNSPECIFIED + - :NEW + - :RUNNING + - :COMPLETE + - :FAILED + - :CACHED + - :CANCELLED + - !ruby/object:Api::Type::String + name: 'name' + description: | + Output only. The resource name of the Execution. + - !ruby/object:Api::Type::String + name: 'etag' + description: | + An eTag used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + - !ruby/object:Api::Type::String + name: 'displayName' + description: | + User provided display name of the Execution. May be up to 128 Unicode characters. + - !ruby/object:Api::Type::NestedObject + name: 'metadata' + description: | + Properties of the Execution. Top level metadata keys' heading and trailing spaces will be trimmed. The size of this field should not exceed 200KB. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + Properties of the object. + - !ruby/object:Api::Type::String + name: 'schemaTitle' + description: | + The title of the schema describing the metadata. Schema title and version is expected to be registered in earlier Create Schema calls. And both are used together as unique identifiers to identify schemas within the local metadata store. + - !ruby/object:Api::Type::String + name: 'description' + description: | + Description of the Execution + - !ruby/object:Api::Type::String + name: 'updateTime' + description: | + Output only. Timestamp when this Execution was last updated. + + + + - !ruby/object:Api::Resource + name: MetadataStoresContext + base_url: '{{parent}}/contexts' + self_link: '{{name}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/vertex-ai/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + Instance of a general context. + properties: + + - !ruby/object:Api::Type::String + name: 'name' + description: | + Immutable. The resource name of the Context. + - !ruby/object:Api::Type::String + name: 'schemaTitle' + description: | + The title of the schema describing the metadata. Schema title and version is expected to be registered in earlier Create Schema calls. And both are used together as unique identifiers to identify schemas within the local metadata store. + - !ruby/object:Api::Type::String + name: 'etag' + description: | + An eTag used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + - !ruby/object:Api::Type::String + name: 'description' + description: | + Description of the Context + - !ruby/object:Api::Type::String + name: 'displayName' + description: | + User provided display name of the Context. May be up to 128 Unicode characters. + - !ruby/object:Api::Type::String + name: 'schemaVersion' + description: | + The version of the schema in schema_name to use. Schema title and version is expected to be registered in earlier Create Schema calls. And both are used together as unique identifiers to identify schemas within the local metadata store. + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Output only. Timestamp when this Context was created. + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + The labels with user-defined metadata to organize your Contexts. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. No more than 64 user labels can be associated with one Context (System labels are excluded). + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'metadata' + description: | + Properties of the Context. Top level metadata keys' heading and trailing spaces will be trimmed. The size of this field should not exceed 200KB. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + Properties of the object. + - !ruby/object:Api::Type::String + name: 'updateTime' + description: | + Output only. Timestamp when this Context was last updated. + - !ruby/object:Api::Type::Array + name: 'parentContexts' + description: | + Output only. A list of resource names of Contexts that are parents of this Context. A Context may have at most 10 parent_contexts. + item_type: Api::Type::String + + + + - !ruby/object:Api::Resource + name: MetadataStoresContext + base_url: '{{parent}}/contexts' + self_link: '{{name}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/vertex-ai/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + Instance of a general context. + properties: + + - !ruby/object:Api::Type::String + name: 'name' + description: | + Immutable. The resource name of the Context. + - !ruby/object:Api::Type::String + name: 'schemaTitle' + description: | + The title of the schema describing the metadata. Schema title and version is expected to be registered in earlier Create Schema calls. And both are used together as unique identifiers to identify schemas within the local metadata store. + - !ruby/object:Api::Type::String + name: 'etag' + description: | + An eTag used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + - !ruby/object:Api::Type::String + name: 'description' + description: | + Description of the Context + - !ruby/object:Api::Type::String + name: 'displayName' + description: | + User provided display name of the Context. May be up to 128 Unicode characters. + - !ruby/object:Api::Type::String + name: 'schemaVersion' + description: | + The version of the schema in schema_name to use. Schema title and version is expected to be registered in earlier Create Schema calls. And both are used together as unique identifiers to identify schemas within the local metadata store. + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Output only. Timestamp when this Context was created. + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + The labels with user-defined metadata to organize your Contexts. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. No more than 64 user labels can be associated with one Context (System labels are excluded). + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'metadata' + description: | + Properties of the Context. Top level metadata keys' heading and trailing spaces will be trimmed. The size of this field should not exceed 200KB. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + Properties of the object. + - !ruby/object:Api::Type::String + name: 'updateTime' + description: | + Output only. Timestamp when this Context was last updated. + - !ruby/object:Api::Type::Array + name: 'parentContexts' + description: | + Output only. A list of resource names of Contexts that are parents of this Context. A Context may have at most 10 parent_contexts. + item_type: Api::Type::String + + + + - !ruby/object:Api::Resource + name: MetadataStoresArtifact + base_url: '{{parent}}/artifacts' + self_link: '{{name}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/vertex-ai/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + Instance of a general artifact. + properties: + + - !ruby/object:Api::Type::String + name: 'schemaVersion' + description: | + The version of the schema in schema_name to use. Schema title and version is expected to be registered in earlier Create Schema calls. And both are used together as unique identifiers to identify schemas within the local metadata store. + - !ruby/object:Api::Type::String + name: 'displayName' + description: | + User provided display name of the Artifact. May be up to 128 Unicode characters. + - !ruby/object:Api::Type::String + name: 'etag' + description: | + An eTag used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Output only. The resource name of the Artifact. + - !ruby/object:Api::Type::String + name: 'updateTime' + description: | + Output only. Timestamp when this Artifact was last updated. + - !ruby/object:Api::Type::Enum + name: 'state' + description: | + The state of this Artifact. This is a property of the Artifact, and does not imply or capture any ongoing process. This property is managed by clients (such as Vertex AI Pipelines), and the system does not prescribe or check the validity of state transitions. + values: + - :STATE_UNSPECIFIED + - :PENDING + - :LIVE + - !ruby/object:Api::Type::NestedObject + name: 'metadata' + description: | + Properties of the Artifact. Top level metadata keys' heading and trailing spaces will be trimmed. The size of this field should not exceed 200KB. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + Properties of the object. + - !ruby/object:Api::Type::String + name: 'uri' + description: | + The uniform resource identifier of the artifact file. May be empty if there is no actual artifact file. + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Output only. Timestamp when this Artifact was created. + - !ruby/object:Api::Type::String + name: 'schemaTitle' + description: | + The title of the schema describing the metadata. Schema title and version is expected to be registered in earlier Create Schema calls. And both are used together as unique identifiers to identify schemas within the local metadata store. + - !ruby/object:Api::Type::String + name: 'description' + description: | + Description of the Artifact + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + The labels with user-defined metadata to organize your Artifacts. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. No more than 64 user labels can be associated with one Artifact (System labels are excluded). + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + + + + - !ruby/object:Api::Resource + name: MetadataStoresArtifact + base_url: '{{parent}}/artifacts' + self_link: '{{name}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/vertex-ai/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + Instance of a general artifact. + properties: + + - !ruby/object:Api::Type::String + name: 'schemaVersion' + description: | + The version of the schema in schema_name to use. Schema title and version is expected to be registered in earlier Create Schema calls. And both are used together as unique identifiers to identify schemas within the local metadata store. + - !ruby/object:Api::Type::String + name: 'displayName' + description: | + User provided display name of the Artifact. May be up to 128 Unicode characters. + - !ruby/object:Api::Type::String + name: 'etag' + description: | + An eTag used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Output only. The resource name of the Artifact. + - !ruby/object:Api::Type::String + name: 'updateTime' + description: | + Output only. Timestamp when this Artifact was last updated. + - !ruby/object:Api::Type::Enum + name: 'state' + description: | + The state of this Artifact. This is a property of the Artifact, and does not imply or capture any ongoing process. This property is managed by clients (such as Vertex AI Pipelines), and the system does not prescribe or check the validity of state transitions. + values: + - :STATE_UNSPECIFIED + - :PENDING + - :LIVE + - !ruby/object:Api::Type::NestedObject + name: 'metadata' + description: | + Properties of the Artifact. Top level metadata keys' heading and trailing spaces will be trimmed. The size of this field should not exceed 200KB. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + Properties of the object. + - !ruby/object:Api::Type::String + name: 'uri' + description: | + The uniform resource identifier of the artifact file. May be empty if there is no actual artifact file. + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Output only. Timestamp when this Artifact was created. + - !ruby/object:Api::Type::String + name: 'schemaTitle' + description: | + The title of the schema describing the metadata. Schema title and version is expected to be registered in earlier Create Schema calls. And both are used together as unique identifiers to identify schemas within the local metadata store. + - !ruby/object:Api::Type::String + name: 'description' + description: | + Description of the Artifact + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + The labels with user-defined metadata to organize your Artifacts. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. No more than 64 user labels can be associated with one Artifact (System labels are excluded). + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Resource + name: DatasetsSavedQuery + base_url: '{{parent}}/savedQueries' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/vertex-ai/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + A SavedQuery is a view of the dataset. It references a subset of annotations by problem type and filters. + properties: + + - !ruby/object:Api::Type::Integer + name: 'annotationSpecCount' + description: | + Output only. Number of AnnotationSpecs in the context of the SavedQuery. + - !ruby/object:Api::Type::String + name: 'updateTime' + description: | + Output only. Timestamp when SavedQuery was last updated. + - !ruby/object:Api::Type::Boolean + name: 'supportAutomlTraining' + description: | + Output only. If the Annotations belonging to the SavedQuery can be used for AutoML training. + - !ruby/object:Api::Type::String + name: 'metadata' + description: | + Some additional information about the SavedQuery. + - !ruby/object:Api::Type::String + name: 'problemType' + description: | + Required. Problem type of the SavedQuery. Allowed values: * IMAGE_CLASSIFICATION_SINGLE_LABEL * IMAGE_CLASSIFICATION_MULTI_LABEL * IMAGE_BOUNDING_POLY * IMAGE_BOUNDING_BOX * TEXT_CLASSIFICATION_SINGLE_LABEL * TEXT_CLASSIFICATION_MULTI_LABEL * TEXT_EXTRACTION * TEXT_SENTIMENT * VIDEO_CLASSIFICATION * VIDEO_OBJECT_TRACKING + - !ruby/object:Api::Type::String + name: 'name' + description: | + Output only. Resource name of the SavedQuery. + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Output only. Timestamp when this SavedQuery was created. + - !ruby/object:Api::Type::String + name: 'etag' + description: | + Used to perform a consistent read-modify-write update. If not set, a blind "overwrite" update happens. + - !ruby/object:Api::Type::String + name: 'displayName' + description: | + Required. The user-defined name of the SavedQuery. The name can be up to 128 characters long and can consist of any UTF-8 characters. + - !ruby/object:Api::Type::String + name: 'annotationFilter' + description: | + Output only. Filters on the Annotations in the dataset. + + + + - !ruby/object:Api::Resource + name: DatasetsSavedQuery + base_url: '{{parent}}/savedQueries' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/vertex-ai/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + A SavedQuery is a view of the dataset. It references a subset of annotations by problem type and filters. + properties: + + - !ruby/object:Api::Type::Integer + name: 'annotationSpecCount' + description: | + Output only. Number of AnnotationSpecs in the context of the SavedQuery. + - !ruby/object:Api::Type::String + name: 'updateTime' + description: | + Output only. Timestamp when SavedQuery was last updated. + - !ruby/object:Api::Type::Boolean + name: 'supportAutomlTraining' + description: | + Output only. If the Annotations belonging to the SavedQuery can be used for AutoML training. + - !ruby/object:Api::Type::String + name: 'metadata' + description: | + Some additional information about the SavedQuery. + - !ruby/object:Api::Type::String + name: 'problemType' + description: | + Required. Problem type of the SavedQuery. Allowed values: * IMAGE_CLASSIFICATION_SINGLE_LABEL * IMAGE_CLASSIFICATION_MULTI_LABEL * IMAGE_BOUNDING_POLY * IMAGE_BOUNDING_BOX * TEXT_CLASSIFICATION_SINGLE_LABEL * TEXT_CLASSIFICATION_MULTI_LABEL * TEXT_EXTRACTION * TEXT_SENTIMENT * VIDEO_CLASSIFICATION * VIDEO_OBJECT_TRACKING + - !ruby/object:Api::Type::String + name: 'name' + description: | + Output only. Resource name of the SavedQuery. + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Output only. Timestamp when this SavedQuery was created. + - !ruby/object:Api::Type::String + name: 'etag' + description: | + Used to perform a consistent read-modify-write update. If not set, a blind "overwrite" update happens. + - !ruby/object:Api::Type::String + name: 'displayName' + description: | + Required. The user-defined name of the SavedQuery. The name can be up to 128 characters long and can consist of any UTF-8 characters. + - !ruby/object:Api::Type::String + name: 'annotationFilter' + description: | + Output only. Filters on the Annotations in the dataset. + + + + - !ruby/object:Api::Resource + name: DatasetsAnnotationSpec + self_link: '{{name}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/vertex-ai/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + Identifies a concept with which DataItems may be annotated with. + properties: + + - !ruby/object:Api::Type::String + name: 'displayName' + description: | + Required. The user-defined name of the AnnotationSpec. The name can be up to 128 characters long and can consist of any UTF-8 characters. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Output only. Resource name of the AnnotationSpec. + - !ruby/object:Api::Type::String + name: 'etag' + description: | + Optional. Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Output only. Timestamp when this AnnotationSpec was created. + - !ruby/object:Api::Type::String + name: 'updateTime' + description: | + Output only. Timestamp when AnnotationSpec was last updated. + + + + - !ruby/object:Api::Resource + name: NasJobsNasTrialDetail + base_url: '{{parent}}/nasTrialDetails' + self_link: '{{name}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/vertex-ai/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + Represents a NasTrial details along with its parameters. If there is a corresponding train NasTrial, the train NasTrial is also returned. + properties: + + - !ruby/object:Api::Type::String + name: 'parameters' + description: | + The parameters for the NasJob NasTrial. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Output only. Resource name of the NasTrialDetail. + - !ruby/object:Api::Type::NestedObject + name: 'searchTrial' + description: | + Represents a uCAIP NasJob trial. + properties: + - !ruby/object:Api::Type::String + name: 'id' + description: | + Output only. The identifier of the NasTrial assigned by the service. + - !ruby/object:Api::Type::Enum + name: 'state' + description: | + Output only. The detailed state of the NasTrial. + values: + - :STATE_UNSPECIFIED + - :REQUESTED + - :ACTIVE + - :STOPPING + - :SUCCEEDED + - :INFEASIBLE + - !ruby/object:Api::Type::NestedObject + name: 'finalMeasurement' + description: | + A message representing a Measurement of a Trial. A Measurement contains the Metrics got by executing a Trial using suggested hyperparameter values. + properties: + - !ruby/object:Api::Type::Array + name: 'metrics' + description: | + Output only. A list of metrics got by evaluating the objective functions using suggested Parameter values. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'metricId' + description: | + Output only. The ID of the Metric. The Metric should be defined in StudySpec's Metrics. + - !ruby/object:Api::Type::Integer + name: 'value' + description: | + Output only. The value for this metric. + - !ruby/object:Api::Type::String + name: 'elapsedDuration' + description: | + Output only. Time that the Trial has been running at the point of this Measurement. + - !ruby/object:Api::Type::String + name: 'stepCount' + description: | + Output only. The number of steps the machine learning model has been trained for. Must be non-negative. + - !ruby/object:Api::Type::String + name: 'startTime' + description: | + Output only. Time when the NasTrial was started. + - !ruby/object:Api::Type::String + name: 'endTime' + description: | + Output only. Time when the NasTrial's status changed to `SUCCEEDED` or `INFEASIBLE`. + - !ruby/object:Api::Type::NestedObject + name: 'trainTrial' + description: | + Represents a uCAIP NasJob trial. + properties: + - !ruby/object:Api::Type::String + name: 'id' + description: | + Output only. The identifier of the NasTrial assigned by the service. + - !ruby/object:Api::Type::Enum + name: 'state' + description: | + Output only. The detailed state of the NasTrial. + values: + - :STATE_UNSPECIFIED + - :REQUESTED + - :ACTIVE + - :STOPPING + - :SUCCEEDED + - :INFEASIBLE + - !ruby/object:Api::Type::NestedObject + name: 'finalMeasurement' + description: | + A message representing a Measurement of a Trial. A Measurement contains the Metrics got by executing a Trial using suggested hyperparameter values. + properties: + - !ruby/object:Api::Type::Array + name: 'metrics' + description: | + Output only. A list of metrics got by evaluating the objective functions using suggested Parameter values. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'metricId' + description: | + Output only. The ID of the Metric. The Metric should be defined in StudySpec's Metrics. + - !ruby/object:Api::Type::Integer + name: 'value' + description: | + Output only. The value for this metric. + - !ruby/object:Api::Type::String + name: 'elapsedDuration' + description: | + Output only. Time that the Trial has been running at the point of this Measurement. + - !ruby/object:Api::Type::String + name: 'stepCount' + description: | + Output only. The number of steps the machine learning model has been trained for. Must be non-negative. + - !ruby/object:Api::Type::String + name: 'startTime' + description: | + Output only. Time when the NasTrial was started. + - !ruby/object:Api::Type::String + name: 'endTime' + description: | + Output only. Time when the NasTrial's status changed to `SUCCEEDED` or `INFEASIBLE`. + + + + - !ruby/object:Api::Resource + name: MetadataStore + base_url: '{{parent}}/metadataStores' + self_link: '{{name}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/vertex-ai/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + Instance of a metadata store. Contains a set of metadata that can be queried. + properties: + + - !ruby/object:Api::Type::String + name: 'description' + description: | + Description of the MetadataStore. + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Output only. Timestamp when this MetadataStore was created. + - !ruby/object:Api::Type::String + name: 'updateTime' + description: | + Output only. Timestamp when this MetadataStore was last updated. + - !ruby/object:Api::Type::NestedObject + name: 'encryptionSpec' + description: | + Represents a customer-managed encryption key spec that can be applied to a top-level resource. + properties: + - !ruby/object:Api::Type::String + name: 'kmsKeyName' + description: | + Required. The Cloud KMS resource identifier of the customer managed encryption key used to protect a resource. Has the form: `projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key`. The key needs to be in the same region as where the compute resource is created. + - !ruby/object:Api::Type::NestedObject + name: 'state' + description: | + Represents state information for a MetadataStore. + properties: + - !ruby/object:Api::Type::String + name: 'diskUtilizationBytes' + description: | + The disk utilization of the MetadataStore in bytes. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Output only. The resource name of the MetadataStore instance. + + + + - !ruby/object:Api::Resource + name: Endpoint + base_url: '{{parent}}/endpoints' + self_link: '{{name}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/vertex-ai/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + Models are deployed into it, and afterwards Endpoint is called to obtain predictions and explanations. + properties: + + - !ruby/object:Api::Type::String + name: 'encryptionSpec' + description: | + Customer-managed encryption key spec for an Endpoint. If set, this Endpoint and all sub-resources of this Endpoint will be secured by this key. + - !ruby/object:Api::Type::Boolean + name: 'enablePrivateServiceConnect' + description: | + Deprecated: If true, expose the Endpoint via private service connect. Only one of the fields, network or enable_private_service_connect, can be set. + - !ruby/object:Api::Type::String + name: 'updateTime' + description: | + Output only. Timestamp when this Endpoint was last updated. + - !ruby/object:Api::Type::String + name: 'modelDeploymentMonitoringJob' + description: | + Output only. Resource name of the Model Monitoring job associated with this Endpoint if monitoring is enabled by JobService.CreateModelDeploymentMonitoringJob. Format: `projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}` + - !ruby/object:Api::Type::String + name: 'description' + description: | + The description of the Endpoint. + - !ruby/object:Api::Type::Array + name: 'deployedModels' + description: | + Output only. The models deployed in this Endpoint. To add or remove DeployedModels use EndpointService.DeployModel and EndpointService.UndeployModel respectively. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'network' + description: | + Optional. The full name of the Google Compute Engine [network](https://cloud.google.com//compute/docs/networks-and-firewalls#networks) to which the Endpoint should be peered. Private services access must already be configured for the network. If left unspecified, the Endpoint is not peered with any network. Only one of the fields, network or enable_private_service_connect, can be set. [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert): `projects/{project}/global/networks/{network}`. Where `{project}` is a project number, as in `12345`, and `{network}` is network name. + - !ruby/object:Api::Type::NestedObject + name: 'trafficSplit' + description: | + A map from a DeployedModel's ID to the percentage of this Endpoint's traffic that should be forwarded to that DeployedModel. If a DeployedModel's ID is not listed in this map, then it receives no traffic. The traffic percentage values must add up to 100, or map must be empty if the Endpoint is to not accept any traffic at a moment. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + The labels with user-defined metadata to organize your Endpoints. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::String + name: 'displayName' + description: | + Required. The display name of the Endpoint. The name can be up to 128 characters long and can consist of any UTF-8 characters. + - !ruby/object:Api::Type::String + name: 'predictRequestResponseLoggingConfig' + description: | + Configures the request-response logging for online prediction. + - !ruby/object:Api::Type::String + name: 'etag' + description: | + Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Output only. Timestamp when this Endpoint was created. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Output only. The resource name of the Endpoint. + + + + - !ruby/object:Api::Resource + name: Endpoint + base_url: '{{parent}}/endpoints' + self_link: '{{name}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/vertex-ai/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + Models are deployed into it, and afterwards Endpoint is called to obtain predictions and explanations. + properties: + + - !ruby/object:Api::Type::String + name: 'encryptionSpec' + description: | + Customer-managed encryption key spec for an Endpoint. If set, this Endpoint and all sub-resources of this Endpoint will be secured by this key. + - !ruby/object:Api::Type::Boolean + name: 'enablePrivateServiceConnect' + description: | + Deprecated: If true, expose the Endpoint via private service connect. Only one of the fields, network or enable_private_service_connect, can be set. + - !ruby/object:Api::Type::String + name: 'updateTime' + description: | + Output only. Timestamp when this Endpoint was last updated. + - !ruby/object:Api::Type::String + name: 'modelDeploymentMonitoringJob' + description: | + Output only. Resource name of the Model Monitoring job associated with this Endpoint if monitoring is enabled by JobService.CreateModelDeploymentMonitoringJob. Format: `projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}` + - !ruby/object:Api::Type::String + name: 'description' + description: | + The description of the Endpoint. + - !ruby/object:Api::Type::Array + name: 'deployedModels' + description: | + Output only. The models deployed in this Endpoint. To add or remove DeployedModels use EndpointService.DeployModel and EndpointService.UndeployModel respectively. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'network' + description: | + Optional. The full name of the Google Compute Engine [network](https://cloud.google.com//compute/docs/networks-and-firewalls#networks) to which the Endpoint should be peered. Private services access must already be configured for the network. If left unspecified, the Endpoint is not peered with any network. Only one of the fields, network or enable_private_service_connect, can be set. [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert): `projects/{project}/global/networks/{network}`. Where `{project}` is a project number, as in `12345`, and `{network}` is network name. + - !ruby/object:Api::Type::NestedObject + name: 'trafficSplit' + description: | + A map from a DeployedModel's ID to the percentage of this Endpoint's traffic that should be forwarded to that DeployedModel. If a DeployedModel's ID is not listed in this map, then it receives no traffic. The traffic percentage values must add up to 100, or map must be empty if the Endpoint is to not accept any traffic at a moment. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + The labels with user-defined metadata to organize your Endpoints. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::String + name: 'displayName' + description: | + Required. The display name of the Endpoint. The name can be up to 128 characters long and can consist of any UTF-8 characters. + - !ruby/object:Api::Type::String + name: 'predictRequestResponseLoggingConfig' + description: | + Configures the request-response logging for online prediction. + - !ruby/object:Api::Type::String + name: 'etag' + description: | + Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Output only. Timestamp when this Endpoint was created. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Output only. The resource name of the Endpoint. + + + + - !ruby/object:Api::Resource + name: Endpoint + base_url: '{{parent}}/endpoints' + self_link: '{{name}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/vertex-ai/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + Models are deployed into it, and afterwards Endpoint is called to obtain predictions and explanations. + properties: + + - !ruby/object:Api::Type::String + name: 'encryptionSpec' + description: | + Customer-managed encryption key spec for an Endpoint. If set, this Endpoint and all sub-resources of this Endpoint will be secured by this key. + - !ruby/object:Api::Type::Boolean + name: 'enablePrivateServiceConnect' + description: | + Deprecated: If true, expose the Endpoint via private service connect. Only one of the fields, network or enable_private_service_connect, can be set. + - !ruby/object:Api::Type::String + name: 'updateTime' + description: | + Output only. Timestamp when this Endpoint was last updated. + - !ruby/object:Api::Type::String + name: 'modelDeploymentMonitoringJob' + description: | + Output only. Resource name of the Model Monitoring job associated with this Endpoint if monitoring is enabled by JobService.CreateModelDeploymentMonitoringJob. Format: `projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}` + - !ruby/object:Api::Type::String + name: 'description' + description: | + The description of the Endpoint. + - !ruby/object:Api::Type::Array + name: 'deployedModels' + description: | + Output only. The models deployed in this Endpoint. To add or remove DeployedModels use EndpointService.DeployModel and EndpointService.UndeployModel respectively. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'network' + description: | + Optional. The full name of the Google Compute Engine [network](https://cloud.google.com//compute/docs/networks-and-firewalls#networks) to which the Endpoint should be peered. Private services access must already be configured for the network. If left unspecified, the Endpoint is not peered with any network. Only one of the fields, network or enable_private_service_connect, can be set. [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert): `projects/{project}/global/networks/{network}`. Where `{project}` is a project number, as in `12345`, and `{network}` is network name. + - !ruby/object:Api::Type::NestedObject + name: 'trafficSplit' + description: | + A map from a DeployedModel's ID to the percentage of this Endpoint's traffic that should be forwarded to that DeployedModel. If a DeployedModel's ID is not listed in this map, then it receives no traffic. The traffic percentage values must add up to 100, or map must be empty if the Endpoint is to not accept any traffic at a moment. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + The labels with user-defined metadata to organize your Endpoints. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::String + name: 'displayName' + description: | + Required. The display name of the Endpoint. The name can be up to 128 characters long and can consist of any UTF-8 characters. + - !ruby/object:Api::Type::String + name: 'predictRequestResponseLoggingConfig' + description: | + Configures the request-response logging for online prediction. + - !ruby/object:Api::Type::String + name: 'etag' + description: | + Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Output only. Timestamp when this Endpoint was created. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Output only. The resource name of the Endpoint. + + + + - !ruby/object:Api::Resource + name: Endpoint + base_url: '{{parent}}/endpoints' + self_link: '{{name}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/vertex-ai/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + Models are deployed into it, and afterwards Endpoint is called to obtain predictions and explanations. + properties: + + - !ruby/object:Api::Type::String + name: 'encryptionSpec' + description: | + Customer-managed encryption key spec for an Endpoint. If set, this Endpoint and all sub-resources of this Endpoint will be secured by this key. + - !ruby/object:Api::Type::Boolean + name: 'enablePrivateServiceConnect' + description: | + Deprecated: If true, expose the Endpoint via private service connect. Only one of the fields, network or enable_private_service_connect, can be set. + - !ruby/object:Api::Type::String + name: 'updateTime' + description: | + Output only. Timestamp when this Endpoint was last updated. + - !ruby/object:Api::Type::String + name: 'modelDeploymentMonitoringJob' + description: | + Output only. Resource name of the Model Monitoring job associated with this Endpoint if monitoring is enabled by JobService.CreateModelDeploymentMonitoringJob. Format: `projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}` + - !ruby/object:Api::Type::String + name: 'description' + description: | + The description of the Endpoint. + - !ruby/object:Api::Type::Array + name: 'deployedModels' + description: | + Output only. The models deployed in this Endpoint. To add or remove DeployedModels use EndpointService.DeployModel and EndpointService.UndeployModel respectively. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'network' + description: | + Optional. The full name of the Google Compute Engine [network](https://cloud.google.com//compute/docs/networks-and-firewalls#networks) to which the Endpoint should be peered. Private services access must already be configured for the network. If left unspecified, the Endpoint is not peered with any network. Only one of the fields, network or enable_private_service_connect, can be set. [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert): `projects/{project}/global/networks/{network}`. Where `{project}` is a project number, as in `12345`, and `{network}` is network name. + - !ruby/object:Api::Type::NestedObject + name: 'trafficSplit' + description: | + A map from a DeployedModel's ID to the percentage of this Endpoint's traffic that should be forwarded to that DeployedModel. If a DeployedModel's ID is not listed in this map, then it receives no traffic. The traffic percentage values must add up to 100, or map must be empty if the Endpoint is to not accept any traffic at a moment. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + The labels with user-defined metadata to organize your Endpoints. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + - !ruby/object:Api::Type::String + name: 'displayName' + description: | + Required. The display name of the Endpoint. The name can be up to 128 characters long and can consist of any UTF-8 characters. + - !ruby/object:Api::Type::String + name: 'predictRequestResponseLoggingConfig' + description: | + Configures the request-response logging for online prediction. + - !ruby/object:Api::Type::String + name: 'etag' + description: | + Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Output only. Timestamp when this Endpoint was created. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Output only. The resource name of the Endpoint. + + + + - !ruby/object:Api::Resource + name: ModelEvaluationSlice + base_url: '{{parent}}/slices' + self_link: '{{name}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/vertex-ai/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + A collection of metrics calculated by comparing Model's predictions on a slice of the test data against ground truth annotations. + properties: + + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Output only. Timestamp when this ModelEvaluationSlice was created. + - !ruby/object:Api::Type::NestedObject + name: 'modelExplanation' + description: | + Aggregated explanation metrics for a Model over a set of instances. + properties: + - !ruby/object:Api::Type::Array + name: 'meanAttributions' + description: | + Output only. Aggregated attributions explaining the Model's prediction outputs over the set of instances. The attributions are grouped by outputs. For Models that predict only one output, such as regression Models that predict only one score, there is only one attibution that explains the predicted output. For Models that predict multiple outputs, such as multiclass Models that predict multiple classes, each element explains one specific item. Attribution.output_index can be used to identify which output this attribution is explaining. The baselineOutputValue, instanceOutputValue and featureAttributions fields are averaged over the test data. NOTE: Currently AutoML tabular classification Models produce only one attribution, which averages attributions over all the classes it predicts. Attribution.approximation_error is not populated. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'outputDisplayName' + description: | + Output only. The display name of the output identified by output_index. For example, the predicted class name by a multi-classification Model. This field is only populated iff the Model predicts display names as a separate field along with the explained output. The predicted display name must has the same shape of the explained output, and can be located using output_index. + - !ruby/object:Api::Type::Integer + name: 'baselineOutputValue' + description: | + Output only. Model predicted output if the input instance is constructed from the baselines of all the features defined in ExplanationMetadata.inputs. The field name of the output is determined by the key in ExplanationMetadata.outputs. If the Model's predicted output has multiple dimensions (rank > 1), this is the value in the output located by output_index. If there are multiple baselines, their output values are averaged. + - !ruby/object:Api::Type::String + name: 'outputName' + description: | + Output only. Name of the explain output. Specified as the key in ExplanationMetadata.outputs. + - !ruby/object:Api::Type::String + name: 'featureAttributions' + description: | + Output only. Attributions of each explained feature. Features are extracted from the prediction instances according to explanation metadata for inputs. The value is a struct, whose keys are the name of the feature. The values are how much the feature in the instance contributed to the predicted result. The format of the value is determined by the feature's input format: * If the feature is a scalar value, the attribution value is a floating number. * If the feature is an array of scalar values, the attribution value is an array. * If the feature is a struct, the attribution value is a struct. The keys in the attribution value struct are the same as the keys in the feature struct. The formats of the values in the attribution struct are determined by the formats of the values in the feature struct. The ExplanationMetadata.feature_attributions_schema_uri field, pointed to by the ExplanationSpec field of the Endpoint.deployed_models object, points to the schema file that describes the features and their attribution values (if it is populated). + - !ruby/object:Api::Type::Array + name: 'outputIndex' + description: | + Output only. The index that locates the explained prediction output. If the prediction output is a scalar value, output_index is not populated. If the prediction output has multiple dimensions, the length of the output_index list is the same as the number of dimensions of the output. The i-th element in output_index is the element index of the i-th dimension of the output vector. Indices start from 0. + item_type: Api::Type::String + - !ruby/object:Api::Type::Integer + name: 'instanceOutputValue' + description: | + Output only. Model predicted output on the corresponding explanation instance. The field name of the output is determined by the key in ExplanationMetadata.outputs. If the Model predicted output has multiple dimensions, this is the value in the output located by output_index. + - !ruby/object:Api::Type::Integer + name: 'approximationError' + description: | + Output only. Error of feature_attributions caused by approximation used in the explanation method. Lower value means more precise attributions. * For Sampled Shapley attribution, increasing path_count might reduce the error. * For Integrated Gradients attribution, increasing step_count might reduce the error. * For XRAI attribution, increasing step_count might reduce the error. See [this introduction](/vertex-ai/docs/explainable-ai/overview) for more information. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Output only. The resource name of the ModelEvaluationSlice. + - !ruby/object:Api::Type::String + name: 'metrics' + description: | + Output only. Sliced evaluation metrics of the Model. The schema of the metrics is stored in metrics_schema_uri + - !ruby/object:Api::Type::NestedObject + name: 'slice' + description: | + Definition of a slice. + properties: + - !ruby/object:Api::Type::String + name: 'value' + description: | + Output only. The value of the dimension in this slice. + - !ruby/object:Api::Type::String + name: 'dimension' + description: | + Output only. The dimension of the slice. Well-known dimensions are: * `annotationSpec`: This slice is on the test data that has either ground truth or prediction with AnnotationSpec.display_name equals to value. * `slice`: This slice is a user customized slice defined by its SliceSpec. + - !ruby/object:Api::Type::NestedObject + name: 'sliceSpec' + description: | + Specification for how the data should be sliced. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'configs' + description: | + Mapping configuration for this SliceSpec. The key is the name of the feature. By default, the key will be prefixed by "instance" as a dictionary prefix for Vertex Batch Predictions output format. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + Specification message containing the config for this SliceSpec. When `kind` is selected as `value` and/or `range`, only a single slice will be computed. When `all_values` is present, a separate slice will be computed for each possible label/value for the corresponding key in `config`. Examples, with feature zip_code with values 12345, 23334, 88888 and feature country with values "US", "Canada", "Mexico" in the dataset: Example 1: { "zip_code": { "value": { "float_value": 12345.0 } } } A single slice for any data with zip_code 12345 in the dataset. Example 2: { "zip_code": { "range": { "low": 12345, "high": 20000 } } } A single slice containing data where the zip_codes between 12345 and 20000 For this example, data with the zip_code of 12345 will be in this slice. Example 3: { "zip_code": { "range": { "low": 10000, "high": 20000 } }, "country": { "value": { "string_value": "US" } } } A single slice containing data where the zip_codes between 10000 and 20000 has the country "US". For this example, data with the zip_code of 12345 and country "US" will be in this slice. Example 4: { "country": {"all_values": { "value": true } } } Three slices are computed, one for each unique country in the dataset. Example 5: { "country": { "all_values": { "value": true } }, "zip_code": { "value": { "float_value": 12345.0 } } } Three slices are computed, one for each unique country in the dataset where the zip_code is also 12345. For this example, data with zip_code 12345 and country "US" will be in one slice, zip_code 12345 and country "Canada" in another slice, and zip_code 12345 and country "Mexico" in another slice, totaling 3 slices. + - !ruby/object:Api::Type::String + name: 'metricsSchemaUri' + description: | + Output only. Points to a YAML file stored on Google Cloud Storage describing the metrics of this ModelEvaluationSlice. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). + + + + - !ruby/object:Api::Resource + name: DatasetsDataItem + base_url: '{{parent}}/dataItems' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/vertex-ai/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + A piece of data in a Dataset. Could be an image, a video, a document or plain text. + properties: + + - !ruby/object:Api::Type::String + name: 'updateTime' + description: | + Output only. Timestamp when this DataItem was last updated. + - !ruby/object:Api::Type::String + name: 'etag' + description: | + Optional. Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Output only. The resource name of the DataItem. + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Output only. Timestamp when this DataItem was created. + - !ruby/object:Api::Type::String + name: 'payload' + description: | + Required. The data that the DataItem represents (for example, an image or a text snippet). The schema of the payload is stored in the parent Dataset's metadata schema's dataItemSchemaUri field. + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + Optional. The labels with user-defined metadata to organize your DataItems. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. No more than 64 user labels can be associated with one DataItem(System labels are excluded). See https://goo.gl/xmQnxf for more information and examples of labels. System reserved label keys are prefixed with "aiplatform.googleapis.com/" and are immutable. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + + + + - !ruby/object:Api::Resource + name: DatasetsDataItem + base_url: '{{parent}}/dataItems' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/vertex-ai/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + A piece of data in a Dataset. Could be an image, a video, a document or plain text. + properties: + + - !ruby/object:Api::Type::String + name: 'updateTime' + description: | + Output only. Timestamp when this DataItem was last updated. + - !ruby/object:Api::Type::String + name: 'etag' + description: | + Optional. Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Output only. The resource name of the DataItem. + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Output only. Timestamp when this DataItem was created. + - !ruby/object:Api::Type::String + name: 'payload' + description: | + Required. The data that the DataItem represents (for example, an image or a text snippet). The schema of the payload is stored in the parent Dataset's metadata schema's dataItemSchemaUri field. + - !ruby/object:Api::Type::NestedObject + name: 'labels' + description: | + Optional. The labels with user-defined metadata to organize your DataItems. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. No more than 64 user labels can be associated with one DataItem(System labels are excluded). See https://goo.gl/xmQnxf for more information and examples of labels. System reserved label keys are prefixed with "aiplatform.googleapis.com/" and are immutable. + properties: + - !ruby/object:Api::Type::String + name: 'additionalProperties' + description: | + + + + + - !ruby/object:Api::Resource + name: TensorboardExperimentRunTimeSeriesResource + base_url: '{{parent}}/timeSeries' + self_link: '{{name}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/vertex-ai/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + TensorboardTimeSeries maps to times series produced in training runs + properties: + + - !ruby/object:Api::Type::String + name: 'pluginName' + description: | + Immutable. Name of the plugin this time series pertain to. Such as Scalar, Tensor, Blob + - !ruby/object:Api::Type::String + name: 'pluginData' + description: | + Data of the current plugin, with the size limited to 65KB. + - !ruby/object:Api::Type::String + name: 'description' + description: | + Description of this TensorboardTimeSeries. + - !ruby/object:Api::Type::String + name: 'etag' + description: | + Used to perform a consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + - !ruby/object:Api::Type::String + name: 'displayName' + description: | + Required. User provided name of this TensorboardTimeSeries. This value should be unique among all TensorboardTimeSeries resources belonging to the same TensorboardRun resource (parent resource). + - !ruby/object:Api::Type::String + name: 'updateTime' + description: | + Output only. Timestamp when this TensorboardTimeSeries was last updated. + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Output only. Timestamp when this TensorboardTimeSeries was created. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Output only. Name of the TensorboardTimeSeries. + - !ruby/object:Api::Type::String + name: 'metadata' + description: | + Output only. Scalar, Tensor, or Blob metadata for this TensorboardTimeSeries. + - !ruby/object:Api::Type::Enum + name: 'valueType' + description: | + Required. Immutable. Type of TensorboardTimeSeries value. + values: + - :VALUE_TYPE_UNSPECIFIED + - :SCALAR + - :TENSOR + - :BLOB_SEQUENCE + + + + - !ruby/object:Api::Resource + name: TensorboardExperimentRunTimeSeriesResource + base_url: '{{parent}}/timeSeries' + self_link: '{{name}}' + references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': + api: 'https://cloud.google.com/vertex-ai/docs' + async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{op_id}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: True + allowed: + - True + - False + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' + description: |- + TensorboardTimeSeries maps to times series produced in training runs + properties: + + - !ruby/object:Api::Type::String + name: 'pluginName' + description: | + Immutable. Name of the plugin this time series pertain to. Such as Scalar, Tensor, Blob + - !ruby/object:Api::Type::String + name: 'pluginData' + description: | + Data of the current plugin, with the size limited to 65KB. + - !ruby/object:Api::Type::String + name: 'description' + description: | + Description of this TensorboardTimeSeries. + - !ruby/object:Api::Type::String + name: 'etag' + description: | + Used to perform a consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + - !ruby/object:Api::Type::String + name: 'displayName' + description: | + Required. User provided name of this TensorboardTimeSeries. This value should be unique among all TensorboardTimeSeries resources belonging to the same TensorboardRun resource (parent resource). + - !ruby/object:Api::Type::String + name: 'updateTime' + description: | + Output only. Timestamp when this TensorboardTimeSeries was last updated. + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Output only. Timestamp when this TensorboardTimeSeries was created. + - !ruby/object:Api::Type::String + name: 'name' + description: | + Output only. Name of the TensorboardTimeSeries. + - !ruby/object:Api::Type::String + name: 'metadata' + description: | + Output only. Scalar, Tensor, or Blob metadata for this TensorboardTimeSeries. + - !ruby/object:Api::Type::Enum + name: 'valueType' + description: | + Required. Immutable. Type of TensorboardTimeSeries value. + values: + - :VALUE_TYPE_UNSPECIFIED + - :SCALAR + - :TENSOR + - :BLOB_SEQUENCE + diff --git a/mmv1/products/vertexai/inspec.yaml b/mmv1/products/vertexai/inspec.yaml new file mode 100644 index 000000000..d756fc8bb --- /dev/null +++ b/mmv1/products/vertexai/inspec.yaml @@ -0,0 +1,18 @@ +# Copyright 2017 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- !ruby/object:Provider::Inspec::Config +overrides: !ruby/object:Overrides::ResourceOverrides + DatasetsAnnotationSpec: !ruby/object:Overrides::Inspec::ResourceOverride + singular_only: true + diff --git a/mmv1/provider/file_template.rb b/mmv1/provider/file_template.rb index 8e837006a..277ed5f70 100644 --- a/mmv1/provider/file_template.rb +++ b/mmv1/provider/file_template.rb @@ -62,6 +62,10 @@ def generate(pwd, template, path, provider) Google::LOGGER.debug "Generating #{path}" path = pwd + '/' +path + + # create the directory if it doesn't exist + FileUtils.mkdir_p(File.dirname(path)) unless File.exist?(File.dirname(path)) + File.open(path, 'w') { |f| f.puts compile_file(ctx, pwd + '/' + template) } # Files are often generated in parallel. diff --git a/mmv1/provider/inspec/Gemfile.lock b/mmv1/provider/inspec/Gemfile.lock new file mode 100644 index 000000000..8b355e52b --- /dev/null +++ b/mmv1/provider/inspec/Gemfile.lock @@ -0,0 +1,974 @@ +GEM + remote: https://rubygems.org/ + specs: + activesupport (7.1.2) + base64 + bigdecimal + concurrent-ruby (~> 1.0, >= 1.0.2) + connection_pool (>= 2.2.5) + drb + i18n (>= 1.6, < 2) + minitest (>= 5.1) + mutex_m + tzinfo (~> 2.0) + addressable (2.8.7) + public_suffix (>= 2.0.2, < 7.0) + ast (2.4.2) + async (2.6.5) + console (~> 1.10) + fiber-annotation + io-event (~> 1.1) + timers (~> 4.1) + async-http (0.61.0) + async (>= 1.25) + async-io (>= 1.28) + async-pool (>= 0.2) + protocol-http (~> 0.25.0) + protocol-http1 (~> 0.16.0) + protocol-http2 (~> 0.15.0) + traces (>= 0.10.0) + async-http-faraday (0.12.0) + async-http (~> 0.42) + faraday + async-io (1.38.0) + async + async-pool (0.4.0) + async (>= 1.25) + aws-eventstream (1.3.0) + aws-partitions (1.864.0) + aws-sdk-account (1.20.0) + aws-sdk-core (~> 3, >= 3.188.0) + aws-sigv4 (~> 1.1) + aws-sdk-alexaforbusiness (1.67.0) + aws-sdk-core (~> 3, >= 3.188.0) + aws-sigv4 (~> 1.1) + aws-sdk-amplify (1.32.0) + aws-sdk-core (~> 3, >= 3.120.0) + aws-sigv4 (~> 1.1) + aws-sdk-apigateway (1.90.0) + aws-sdk-core (~> 3, >= 3.188.0) + aws-sigv4 (~> 1.1) + aws-sdk-apigatewayv2 (1.53.0) + aws-sdk-core (~> 3, >= 3.188.0) + aws-sigv4 (~> 1.1) + aws-sdk-applicationautoscaling (1.51.0) + aws-sdk-core (~> 3, >= 3.112.0) + aws-sigv4 (~> 1.1) + aws-sdk-athena (1.79.0) + aws-sdk-core (~> 3, >= 3.188.0) + aws-sigv4 (~> 1.1) + aws-sdk-autoscaling (1.92.0) + aws-sdk-core (~> 3, >= 3.176.0) + aws-sigv4 (~> 1.1) + aws-sdk-batch (1.73.0) + aws-sdk-core (~> 3, >= 3.176.0) + aws-sigv4 (~> 1.1) + aws-sdk-budgets (1.62.0) + aws-sdk-core (~> 3, >= 3.188.0) + aws-sigv4 (~> 1.1) + aws-sdk-cloudformation (1.97.0) + aws-sdk-core (~> 3, >= 3.188.0) + aws-sigv4 (~> 1.1) + aws-sdk-cloudfront (1.86.0) + aws-sdk-core (~> 3, >= 3.188.0) + aws-sigv4 (~> 1.1) + aws-sdk-cloudhsm (1.50.0) + aws-sdk-core (~> 3, >= 3.188.0) + aws-sigv4 (~> 1.1) + aws-sdk-cloudhsmv2 (1.53.0) + aws-sdk-core (~> 3, >= 3.188.0) + aws-sigv4 (~> 1.1) + aws-sdk-cloudtrail (1.74.0) + aws-sdk-core (~> 3, >= 3.188.0) + aws-sigv4 (~> 1.1) + aws-sdk-cloudwatch (1.83.0) + aws-sdk-core (~> 3, >= 3.188.0) + aws-sigv4 (~> 1.1) + aws-sdk-cloudwatchevents (1.62.0) + aws-sdk-core (~> 3, >= 3.176.0) + aws-sigv4 (~> 1.1) + aws-sdk-cloudwatchlogs (1.75.0) + aws-sdk-core (~> 3, >= 3.188.0) + aws-sigv4 (~> 1.1) + aws-sdk-codecommit (1.62.0) + aws-sdk-core (~> 3, >= 3.188.0) + aws-sigv4 (~> 1.1) + aws-sdk-codedeploy (1.63.0) + aws-sdk-core (~> 3, >= 3.188.0) + aws-sigv4 (~> 1.1) + aws-sdk-codepipeline (1.67.0) + aws-sdk-core (~> 3, >= 3.188.0) + aws-sigv4 (~> 1.1) + aws-sdk-cognitoidentity (1.45.0) + aws-sdk-core (~> 3, >= 3.176.0) + aws-sigv4 (~> 1.1) + aws-sdk-cognitoidentityprovider (1.76.0) + aws-sdk-core (~> 3, >= 3.176.0) + aws-sigv4 (~> 1.1) + aws-sdk-configservice (1.103.0) + aws-sdk-core (~> 3, >= 3.188.0) + aws-sigv4 (~> 1.1) + aws-sdk-core (3.190.0) + aws-eventstream (~> 1, >= 1.3.0) + aws-partitions (~> 1, >= 1.651.0) + aws-sigv4 (~> 1.8) + jmespath (~> 1, >= 1.6.1) + aws-sdk-costandusagereportservice (1.53.0) + aws-sdk-core (~> 3, >= 3.188.0) + aws-sigv4 (~> 1.1) + aws-sdk-databasemigrationservice (1.80.0) + aws-sdk-core (~> 3, >= 3.176.0) + aws-sigv4 (~> 1.1) + aws-sdk-dynamodb (1.98.0) + aws-sdk-core (~> 3, >= 3.188.0) + aws-sigv4 (~> 1.1) + aws-sdk-ec2 (1.429.0) + aws-sdk-core (~> 3, >= 3.188.0) + aws-sigv4 (~> 1.1) + aws-sdk-ecr (1.68.0) + aws-sdk-core (~> 3, >= 3.188.0) + aws-sigv4 (~> 1.1) + aws-sdk-ecrpublic (1.25.0) + aws-sdk-core (~> 3, >= 3.188.0) + aws-sigv4 (~> 1.1) + aws-sdk-ecs (1.135.0) + aws-sdk-core (~> 3, >= 3.188.0) + aws-sigv4 (~> 1.1) + aws-sdk-efs (1.71.0) + aws-sdk-core (~> 3, >= 3.188.0) + aws-sigv4 (~> 1.1) + aws-sdk-eks (1.95.0) + aws-sdk-core (~> 3, >= 3.188.0) + aws-sigv4 (~> 1.1) + aws-sdk-elasticache (1.95.0) + aws-sdk-core (~> 3, >= 3.188.0) + aws-sigv4 (~> 1.1) + aws-sdk-elasticbeanstalk (1.63.0) + aws-sdk-core (~> 3, >= 3.188.0) + aws-sigv4 (~> 1.1) + aws-sdk-elasticloadbalancing (1.51.0) + aws-sdk-core (~> 3, >= 3.188.0) + aws-sigv4 (~> 1.1) + aws-sdk-elasticloadbalancingv2 (1.96.0) + aws-sdk-core (~> 3, >= 3.188.0) + aws-sigv4 (~> 1.1) + aws-sdk-elasticsearchservice (1.79.0) + aws-sdk-core (~> 3, >= 3.188.0) + aws-sigv4 (~> 1.1) + aws-sdk-emr (1.53.0) + aws-sdk-core (~> 3, >= 3.121.2) + aws-sigv4 (~> 1.1) + aws-sdk-eventbridge (1.46.0) + aws-sdk-core (~> 3, >= 3.176.0) + aws-sigv4 (~> 1.1) + aws-sdk-firehose (1.60.0) + aws-sdk-core (~> 3, >= 3.188.0) + aws-sigv4 (~> 1.1) + aws-sdk-glue (1.145.0) + aws-sdk-core (~> 3, >= 3.176.0) + aws-sigv4 (~> 1.1) + aws-sdk-guardduty (1.85.0) + aws-sdk-core (~> 3, >= 3.188.0) + aws-sigv4 (~> 1.1) + aws-sdk-iam (1.92.0) + aws-sdk-core (~> 3, >= 3.188.0) + aws-sigv4 (~> 1.1) + aws-sdk-kafka (1.67.0) + aws-sdk-core (~> 3, >= 3.188.0) + aws-sigv4 (~> 1.1) + aws-sdk-kinesis (1.54.0) + aws-sdk-core (~> 3, >= 3.188.0) + aws-sigv4 (~> 1.1) + aws-sdk-kms (1.74.0) + aws-sdk-core (~> 3, >= 3.188.0) + aws-sigv4 (~> 1.1) + aws-sdk-lambda (1.113.0) + aws-sdk-core (~> 3, >= 3.188.0) + aws-sigv4 (~> 1.1) + aws-sdk-mq (1.40.0) + aws-sdk-core (~> 3, >= 3.120.0) + aws-sigv4 (~> 1.1) + aws-sdk-networkfirewall (1.39.0) + aws-sdk-core (~> 3, >= 3.188.0) + aws-sigv4 (~> 1.1) + aws-sdk-networkmanager (1.40.0) + aws-sdk-core (~> 3, >= 3.188.0) + aws-sigv4 (~> 1.1) + aws-sdk-organizations (1.77.0) + aws-sdk-core (~> 3, >= 3.176.0) + aws-sigv4 (~> 1.1) + aws-sdk-ram (1.26.0) + aws-sdk-core (~> 3, >= 3.112.0) + aws-sigv4 (~> 1.1) + aws-sdk-rds (1.208.0) + aws-sdk-core (~> 3, >= 3.188.0) + aws-sigv4 (~> 1.1) + aws-sdk-redshift (1.107.0) + aws-sdk-core (~> 3, >= 3.188.0) + aws-sigv4 (~> 1.1) + aws-sdk-route53 (1.83.0) + aws-sdk-core (~> 3, >= 3.188.0) + aws-sigv4 (~> 1.1) + aws-sdk-route53domains (1.54.0) + aws-sdk-core (~> 3, >= 3.188.0) + aws-sigv4 (~> 1.1) + aws-sdk-route53resolver (1.51.0) + aws-sdk-core (~> 3, >= 3.188.0) + aws-sigv4 (~> 1.1) + aws-sdk-s3 (1.141.0) + aws-sdk-core (~> 3, >= 3.189.0) + aws-sdk-kms (~> 1) + aws-sigv4 (~> 1.8) + aws-sdk-s3control (1.43.0) + aws-sdk-core (~> 3, >= 3.122.0) + aws-sigv4 (~> 1.1) + aws-sdk-secretsmanager (1.46.0) + aws-sdk-core (~> 3, >= 3.112.0) + aws-sigv4 (~> 1.1) + aws-sdk-securityhub (1.98.0) + aws-sdk-core (~> 3, >= 3.188.0) + aws-sigv4 (~> 1.1) + aws-sdk-servicecatalog (1.60.0) + aws-sdk-core (~> 3, >= 3.112.0) + aws-sigv4 (~> 1.1) + aws-sdk-ses (1.41.0) + aws-sdk-core (~> 3, >= 3.120.0) + aws-sigv4 (~> 1.1) + aws-sdk-shield (1.60.0) + aws-sdk-core (~> 3, >= 3.188.0) + aws-sigv4 (~> 1.1) + aws-sdk-signer (1.32.0) + aws-sdk-core (~> 3, >= 3.120.0) + aws-sigv4 (~> 1.1) + aws-sdk-simpledb (1.29.0) + aws-sdk-core (~> 3, >= 3.120.0) + aws-sigv2 (~> 1.0) + aws-sdk-sms (1.52.0) + aws-sdk-core (~> 3, >= 3.188.0) + aws-sigv4 (~> 1.1) + aws-sdk-sns (1.70.0) + aws-sdk-core (~> 3, >= 3.188.0) + aws-sigv4 (~> 1.1) + aws-sdk-sqs (1.69.0) + aws-sdk-core (~> 3, >= 3.188.0) + aws-sigv4 (~> 1.1) + aws-sdk-ssm (1.162.0) + aws-sdk-core (~> 3, >= 3.188.0) + aws-sigv4 (~> 1.1) + aws-sdk-states (1.39.0) + aws-sdk-core (~> 3, >= 3.112.0) + aws-sigv4 (~> 1.1) + aws-sdk-synthetics (1.19.0) + aws-sdk-core (~> 3, >= 3.121.2) + aws-sigv4 (~> 1.1) + aws-sdk-transfer (1.73.0) + aws-sdk-core (~> 3, >= 3.176.0) + aws-sigv4 (~> 1.1) + aws-sdk-waf (1.43.0) + aws-sdk-core (~> 3, >= 3.122.0) + aws-sigv4 (~> 1.1) + aws-sigv2 (1.2.0) + aws-sigv4 (1.8.0) + aws-eventstream (~> 1, >= 1.0.2) + azure_graph_rbac (0.17.2) + ms_rest_azure (~> 0.12.0) + azure_mgmt_key_vault (0.17.7) + ms_rest_azure (~> 0.12.0) + azure_mgmt_resources (0.18.2) + ms_rest_azure (~> 0.12.0) + azure_mgmt_security (0.19.0) + ms_rest_azure (~> 0.12.0) + azure_mgmt_storage (0.23.0) + ms_rest_azure (~> 0.12.0) + base64 (0.2.0) + bigdecimal (3.1.4) + builder (3.2.4) + bundle (0.0.1) + bundler + chef-utils (18.3.0) + concurrent-ruby + coderay (1.1.3) + concurrent-ruby (1.2.2) + connection_pool (2.4.1) + console (1.23.3) + fiber-annotation + fiber-local + coolline (0.5.0) + unicode_utils (~> 1.4) + crack (0.4.5) + rexml + declarative (0.0.20) + diff-lcs (1.5.1) + digest-crc (0.6.5) + rake (>= 12.0.0, < 14.0.0) + docker-api (2.2.0) + excon (>= 0.47.0) + multi_json + domain_name (0.6.20231109) + drb (2.2.0) + ruby2_keywords + erubi (1.12.0) + excon (0.105.0) + faraday (0.17.6) + multipart-post (>= 1.2, < 3) + faraday-cookie_jar (0.0.7) + faraday (>= 0.8.0) + http-cookie (~> 1.0.0) + faraday-http-cache (2.5.0) + faraday (>= 0.8) + faraday_middleware (0.12.2) + faraday (>= 0.7.4, < 1.0) + ffi (1.16.3) + fiber-annotation (0.2.0) + fiber-local (1.0.0) + gapic-common (0.3.4) + google-protobuf (~> 3.12, >= 3.12.2) + googleapis-common-protos (>= 1.3.9, < 2.0) + googleapis-common-protos-types (>= 1.0.4, < 2.0) + googleauth (~> 0.9) + grpc (~> 1.25) + github_changelog_generator (1.16.4) + activesupport + async (>= 1.25.0) + async-http-faraday + faraday-http-cache + multi_json + octokit (~> 4.6) + rainbow (>= 2.2.1) + rake (>= 10.0) + google-api-client (0.52.0) + addressable (~> 2.5, >= 2.5.1) + googleauth (~> 0.9) + httpclient (>= 2.8.1, < 3.0) + mini_mime (~> 1.0) + representable (~> 3.0) + retriable (>= 2.0, < 4.0) + rexml + signet (~> 0.12) + google-apis-bigquery_v2 (0.14.0) + google-apis-core (>= 0.3, < 2.a) + google-apis-cloudresourcemanager_v1 (0.12.0) + google-apis-core (>= 0.3, < 2.a) + google-apis-core (0.3.0) + addressable (~> 2.5, >= 2.5.1) + googleauth (~> 0.14) + httpclient (>= 2.8.1, < 3.0) + mini_mime (~> 1.0) + representable (~> 3.0) + retriable (>= 2.0, < 4.0) + rexml + signet (~> 0.14) + webrick + google-apis-dns_v1 (0.11.0) + google-apis-core (>= 0.3, < 2.a) + google-apis-iamcredentials_v1 (0.5.0) + google-apis-core (>= 0.3, < 2.a) + google-apis-storage_v1 (0.5.0) + google-apis-core (>= 0.3, < 2.a) + google-cloud (0.64.0) + google-cloud-asset (~> 0.1) + google-cloud-bigquery (~> 1.1) + google-cloud-bigquery-data_transfer (~> 0.1) + google-cloud-bigtable (~> 1.0) + google-cloud-container (~> 0.1) + google-cloud-dataproc (~> 0.1) + google-cloud-datastore (~> 1.4) + google-cloud-dialogflow (~> 0.1) + google-cloud-dlp (~> 0.4) + google-cloud-dns (~> 0.28) + google-cloud-error_reporting (~> 0.30) + google-cloud-firestore (~> 1.0) + google-cloud-kms (~> 1.0) + google-cloud-language (~> 0.30) + google-cloud-logging (~> 1.5) + google-cloud-monitoring (~> 0.28) + google-cloud-os_login (~> 0.1) + google-cloud-phishing_protection (~> 0.1) + google-cloud-pubsub (~> 1.0) + google-cloud-recaptcha_enterprise (~> 0.1) + google-cloud-redis (~> 0.2) + google-cloud-resource_manager (~> 0.29) + google-cloud-scheduler (~> 1.0) + google-cloud-security_center (~> 0.1) + google-cloud-spanner (~> 1.3) + google-cloud-speech (~> 0.29) + google-cloud-storage (~> 1.10) + google-cloud-talent (~> 0.1) + google-cloud-tasks (~> 1.0) + google-cloud-text_to_speech (~> 0.1) + google-cloud-trace (~> 0.31) + google-cloud-translate (~> 2.0) + google-cloud-video_intelligence (~> 2.0) + google-cloud-vision (~> 0.28) + google-cloud-asset (0.8.1) + google-gax (~> 1.8) + googleapis-common-protos (>= 1.3.9, < 2.0) + googleapis-common-protos-types (>= 1.0.4, < 2.0) + grpc-google-iam-v1 (~> 0.6.9) + google-cloud-bigquery (1.31.0) + concurrent-ruby (~> 1.0) + google-apis-bigquery_v2 (~> 0.1) + google-cloud-core (~> 1.2) + googleauth (~> 0.9) + mini_mime (~> 1.0) + google-cloud-bigquery-data_transfer (0.9.0) + google-gax (~> 1.8) + googleapis-common-protos (>= 1.3.9, < 2.0) + googleapis-common-protos-types (>= 1.0.4, < 2.0) + google-cloud-bigtable (1.3.0) + google-cloud-core (~> 1.1) + google-gax (~> 1.8) + googleapis-common-protos (>= 1.3.9, < 2.0) + googleapis-common-protos-types (>= 1.0.4, < 2.0) + grpc-google-iam-v1 (~> 0.6.9) + google-cloud-container (0.11.0) + google-gax (~> 1.8) + googleapis-common-protos (>= 1.3.9, < 2.0) + googleapis-common-protos-types (>= 1.0.4, < 2.0) + google-cloud-core (1.6.1) + google-cloud-env (>= 1.0, < 3.a) + google-cloud-errors (~> 1.0) + google-cloud-dataproc (0.10.0) + google-gax (~> 1.8) + googleapis-common-protos (>= 1.3.9, < 2.0) + googleapis-common-protos-types (>= 1.0.4, < 2.0) + google-cloud-datastore (1.8.2) + google-cloud-core (~> 1.2) + google-gax (~> 1.8) + googleapis-common-protos (>= 1.3.9, < 2.0) + googleapis-common-protos-types (>= 1.0.4, < 2.0) + google-cloud-dialogflow (0.15.0) + google-gax (~> 1.8) + googleapis-common-protos (>= 1.3.9, < 2.0) + googleapis-common-protos-types (>= 1.0.4, < 2.0) + google-cloud-dlp (0.15.0) + google-gax (~> 1.8) + googleapis-common-protos (>= 1.3.9, < 2.0) + googleapis-common-protos-types (>= 1.0.4, < 2.0) + google-cloud-dns (0.35.0) + google-apis-dns_v1 (~> 0.1) + google-cloud-core (~> 1.2) + googleauth (~> 0.9) + zonefile (~> 1.04) + google-cloud-env (1.6.0) + faraday (>= 0.17.3, < 3.0) + google-cloud-error_reporting (0.42.3) + concurrent-ruby (~> 1.1) + google-cloud-core (~> 1.5) + google-cloud-error_reporting-v1beta1 (~> 0.0) + stackdriver-core (~> 1.3) + google-cloud-error_reporting-v1beta1 (0.4.0) + gapic-common (~> 0.3) + google-cloud-errors (~> 1.0) + google-cloud-errors (1.3.1) + google-cloud-firestore (1.4.4) + concurrent-ruby (~> 1.0) + google-cloud-core (~> 1.2) + google-gax (~> 1.8) + googleapis-common-protos (>= 1.3.9, < 2.0) + googleapis-common-protos-types (>= 1.0.4, < 2.0) + rbtree (~> 0.4.2) + google-cloud-kms (1.6.0) + google-gax (~> 1.8) + googleapis-common-protos (>= 1.3.9, < 2.0) + googleapis-common-protos-types (>= 1.0.4, < 2.0) + grpc-google-iam-v1 (~> 0.6.9) + google-cloud-language (0.36.0) + google-gax (~> 1.8) + googleapis-common-protos (>= 1.3.9, < 2.0) + googleapis-common-protos-types (>= 1.0.4, < 2.0) + google-cloud-logging (1.10.9) + concurrent-ruby (~> 1.1) + google-cloud-core (~> 1.2) + google-gax (~> 1.8) + googleapis-common-protos (>= 1.3.9, < 2.0) + googleapis-common-protos-types (>= 1.0.4, < 2.0) + stackdriver-core (~> 1.3) + google-cloud-monitoring (0.38.0) + google-gax (~> 1.8) + googleapis-common-protos (>= 1.3.9, < 2.0) + googleapis-common-protos-types (>= 1.0.4, < 2.0) + google-cloud-os_login (0.7.0) + google-gax (~> 1.8) + googleapis-common-protos (>= 1.3.9, < 2.0) + googleapis-common-protos-types (>= 1.0.4, < 2.0) + google-cloud-phishing_protection (0.12.0) + google-cloud-core (~> 1.6) + google-cloud-phishing_protection-v1beta1 (>= 0.0, < 2.a) + google-cloud-phishing_protection-v1beta1 (0.3.0) + gapic-common (~> 0.3) + google-cloud-errors (~> 1.0) + google-cloud-pubsub (1.10.0) + concurrent-ruby (~> 1.1) + google-cloud-core (~> 1.2) + google-gax (~> 1.8) + googleapis-common-protos (>= 1.3.9, < 2.0) + googleapis-common-protos-types (>= 1.0.4, < 2.0) + grpc-google-iam-v1 (~> 0.6.9) + google-cloud-recaptcha_enterprise (0.6.0) + google-gax (~> 1.8) + googleapis-common-protos (>= 1.3.9, < 2.0) + googleapis-common-protos-types (>= 1.0.4, < 2.0) + grpc-google-iam-v1 (~> 0.6.9) + google-cloud-redis (0.8.2) + google-gax (~> 1.8) + googleapis-common-protos (>= 1.3.9, < 2.0) + googleapis-common-protos-types (>= 1.0.4, < 2.0) + google-cloud-resource_manager (0.36.0) + google-apis-cloudresourcemanager_v1 (~> 0.1) + google-cloud-core (~> 1.2) + googleauth (~> 0.9) + google-cloud-scheduler (1.3.1) + google-gax (~> 1.8) + googleapis-common-protos (>= 1.3.9, < 2.0) + googleapis-common-protos-types (>= 1.0.4, < 2.0) + google-cloud-security_center (0.10.0) + google-gax (~> 1.8) + googleapis-common-protos (>= 1.3.9, < 2.0) + googleapis-common-protos-types (>= 1.0.4, < 2.0) + grpc-google-iam-v1 (~> 0.6.9) + google-cloud-spanner (1.16.2) + concurrent-ruby (~> 1.0) + google-cloud-core (~> 1.2) + google-gax (~> 1.8) + googleapis-common-protos (>= 1.3.9, < 2.0) + googleapis-common-protos-types (>= 1.0.4, < 2.0) + grpc-google-iam-v1 (~> 0.6.9) + google-cloud-speech (0.41.0) + google-gax (~> 1.8) + googleapis-common-protos (>= 1.3.9, < 2.0) + googleapis-common-protos-types (>= 1.0.4, < 2.0) + google-cloud-storage (1.31.1) + addressable (~> 2.5) + digest-crc (~> 0.4) + google-apis-iamcredentials_v1 (~> 0.1) + google-apis-storage_v1 (~> 0.1) + google-cloud-core (~> 1.2) + googleauth (~> 0.9) + mini_mime (~> 1.0) + google-cloud-talent (0.20.0) + google-cloud-core (~> 1.5) + google-cloud-talent-v4beta1 (~> 0.0) + google-cloud-talent-v4beta1 (0.4.0) + gapic-common (~> 0.3) + google-cloud-errors (~> 1.0) + google-cloud-tasks (1.5.1) + google-gax (~> 1.8) + googleapis-common-protos (>= 1.3.9, < 2.0) + googleapis-common-protos-types (>= 1.0.4, < 2.0) + grpc-google-iam-v1 (~> 0.6.9) + google-cloud-text_to_speech (0.7.0) + google-gax (~> 1.8) + googleapis-common-protos (>= 1.3.9, < 2.0) + googleapis-common-protos-types (>= 1.0.4, < 2.0) + google-cloud-trace (0.42.2) + concurrent-ruby (~> 1.1) + google-cloud-core (~> 1.5) + google-cloud-trace-v1 (~> 0.0) + google-cloud-trace-v2 (~> 0.0) + stackdriver-core (~> 1.3) + google-cloud-trace-v1 (0.3.0) + gapic-common (~> 0.3) + google-cloud-errors (~> 1.0) + google-cloud-trace-v2 (0.3.0) + gapic-common (~> 0.3) + google-cloud-errors (~> 1.0) + google-cloud-translate (2.3.0) + faraday (>= 0.17.3, < 2.0) + google-cloud-core (~> 1.2) + google-gax (~> 1.8) + googleapis-common-protos (>= 1.3.9, < 2.0) + googleapis-common-protos-types (>= 1.0.4, < 2.0) + google-cloud-video_intelligence (2.1.1) + google-gax (~> 1.8) + googleapis-common-protos (>= 1.3.9, < 2.0) + googleapis-common-protos-types (>= 1.0.4, < 2.0) + google-cloud-vision (0.38.0) + google-gax (~> 1.8) + googleapis-common-protos (>= 1.3.9, < 2.0) + googleapis-common-protos-types (>= 1.0.4, < 2.0) + google-gax (1.8.2) + google-protobuf (~> 3.9) + googleapis-common-protos (>= 1.3.9, < 2.0) + googleapis-common-protos-types (>= 1.0.4, < 2.0) + googleauth (~> 0.9) + grpc (~> 1.24) + rly (~> 0.2.3) + google-protobuf (3.25.1) + google-protobuf (3.25.1-x86_64-darwin) + googleapis-common-protos (1.4.0) + google-protobuf (~> 3.14) + googleapis-common-protos-types (~> 1.2) + grpc (~> 1.27) + googleapis-common-protos-types (1.11.0) + google-protobuf (~> 3.18) + googleauth (0.14.0) + faraday (>= 0.17.3, < 2.0) + jwt (>= 1.4, < 3.0) + memoist (~> 0.16) + multi_json (~> 1.11) + os (>= 0.9, < 2.0) + signet (~> 0.14) + grpc (1.60.0) + google-protobuf (~> 3.25) + googleapis-common-protos-types (~> 1.0) + grpc (1.60.0-x86_64-darwin) + google-protobuf (~> 3.25) + googleapis-common-protos-types (~> 1.0) + grpc-google-iam-v1 (0.6.11) + google-protobuf (~> 3.14) + googleapis-common-protos (>= 1.3.11, < 2.0) + grpc (~> 1.27) + gssapi (1.3.1) + ffi (>= 1.0.1) + gyoku (1.4.0) + builder (>= 2.1.2) + rexml (~> 3.0) + hashdiff (1.0.1) + hashie (3.6.0) + htmlentities (4.3.4) + http-cookie (1.0.5) + domain_name (~> 0.5) + httpclient (2.8.3) + i18n (1.14.1) + concurrent-ruby (~> 1.0) + inifile (3.0.0) + inspec (4.16.0) + addressable (~> 2.4) + faraday (>= 0.9.0) + faraday_middleware (~> 0.12.2) + hashie (~> 3.4) + htmlentities + json-schema (~> 2.8) + license-acceptance (>= 0.2.13, < 2.0) + method_source (~> 0.8) + mixlib-log + multipart-post + parallel (~> 1.9) + parslet (~> 1.5) + pry (~> 0) + rspec (~> 3) + rspec-its (~> 1.2) + rubyzip (~> 1.2, >= 1.2.2) + semverse + sslshake (~> 1.2) + term-ansicolor + thor (~> 0.20) + tomlrb (~> 1.2) + train (~> 3.0) + train-aws (~> 0.1) + train-habitat (~> 0.1) + train-winrm (~> 0.2) + tty-prompt (~> 0.17) + tty-table (~> 0.10) + inspec-bin (4.16.0) + inspec (= 4.16.0) + io-event (1.3.3) + jmespath (1.6.2) + json (2.7.1) + json-schema (2.8.1) + addressable (>= 2.4) + jwt (2.7.1) + language_server-protocol (3.17.0.3) + license-acceptance (1.0.19) + pastel (~> 0.7) + tomlrb (~> 1.2) + tty-box (~> 0.3) + tty-prompt (~> 0.18) + little-plugger (1.1.4) + logging (2.3.1) + little-plugger (~> 1.1) + multi_json (~> 1.14) + memoist (0.16.2) + method_source (0.9.2) + mini_mime (1.1.5) + minitest (5.20.0) + mixlib-log (3.0.9) + mixlib-shellout (3.2.7) + chef-utils + ms_rest (0.7.6) + concurrent-ruby (~> 1.0) + faraday (>= 0.9, < 2.0.0) + timeliness (~> 0.3.10) + ms_rest_azure (0.12.0) + concurrent-ruby (~> 1.0) + faraday (>= 0.9, < 2.0.0) + faraday-cookie_jar (~> 0.0.6) + ms_rest (~> 0.7.6) + multi_json (1.15.0) + multipart-post (2.3.0) + mutex_m (0.2.0) + net-scp (4.0.0) + net-ssh (>= 2.6.5, < 8.0.0) + net-ssh (7.2.1) + nori (2.6.0) + octokit (4.22.0) + faraday (>= 0.9) + sawyer (~> 0.8.0, >= 0.5.3) + os (1.1.4) + parallel (1.23.0) + parser (3.2.2.4) + ast (~> 2.4.1) + racc + parslet (1.8.2) + pastel (0.8.0) + tty-color (~> 0.5) + protocol-hpack (1.4.2) + protocol-http (0.25.0) + protocol-http1 (0.16.0) + protocol-http (~> 0.22) + protocol-http2 (0.15.1) + protocol-hpack (~> 1.4) + protocol-http (~> 0.18) + pry (0.12.2) + coderay (~> 1.1.0) + method_source (~> 0.9.0) + pry-coolline (0.2.5) + coolline (~> 0.5) + public_suffix (6.0.1) + racc (1.7.3) + rainbow (3.1.1) + rake (13.2.1) + rbtree (0.4.6) + regexp_parser (2.8.3) + representable (3.2.0) + declarative (< 0.1.0) + trailblazer-option (>= 0.1.1, < 0.2.0) + uber (< 0.2.0) + retriable (3.1.2) + rexml (3.3.9) + rly (0.2.3) + rspec (3.12.0) + rspec-core (~> 3.12.0) + rspec-expectations (~> 3.12.0) + rspec-mocks (~> 3.12.0) + rspec-core (3.12.3) + rspec-support (~> 3.12.0) + rspec-expectations (3.12.4) + diff-lcs (>= 1.2.0, < 2.0) + rspec-support (~> 3.12.0) + rspec-its (1.3.0) + rspec-core (>= 3.0.0) + rspec-expectations (>= 3.0.0) + rspec-mocks (3.12.7) + diff-lcs (>= 1.2.0, < 2.0) + rspec-support (~> 3.12.0) + rspec-support (3.12.2) + rubocop (1.58.0) + json (~> 2.3) + language_server-protocol (>= 3.17.0) + parallel (~> 1.10) + parser (>= 3.2.2.4) + rainbow (>= 2.2.2, < 4.0) + regexp_parser (>= 1.8, < 3.0) + rexml (>= 3.2.5, < 4.0) + rubocop-ast (>= 1.30.0, < 2.0) + ruby-progressbar (~> 1.7) + unicode-display_width (>= 2.4.0, < 3.0) + rubocop-ast (1.30.0) + parser (>= 3.2.1.0) + ruby-progressbar (1.13.0) + ruby2_keywords (0.0.5) + rubyntlm (0.6.3) + rubyzip (1.3.0) + sawyer (0.8.2) + addressable (>= 2.3.5) + faraday (> 0.8, < 2.0) + semverse (3.0.2) + signet (0.18.0) + addressable (~> 2.8) + faraday (>= 0.17.5, < 3.a) + jwt (>= 1.5, < 3.0) + multi_json (~> 1.10) + sslshake (1.3.1) + stackdriver-core (1.5.0) + google-cloud-core (~> 1.2) + strings (0.2.1) + strings-ansi (~> 0.2) + unicode-display_width (>= 1.5, < 3.0) + unicode_utils (~> 1.4) + strings-ansi (0.2.0) + sync (0.5.0) + term-ansicolor (1.7.1) + tins (~> 1.0) + thor (0.20.3) + timeliness (0.3.10) + timers (4.3.5) + tins (1.32.1) + sync + tomlrb (1.3.0) + traces (0.11.1) + trailblazer-option (0.1.2) + train (3.11.0) + activesupport (>= 6.0.3.1) + azure_graph_rbac (~> 0.16) + azure_mgmt_key_vault (~> 0.17) + azure_mgmt_resources (~> 0.15) + azure_mgmt_security (~> 0.18) + azure_mgmt_storage (~> 0.18) + docker-api (>= 1.26, < 3.0) + google-api-client (>= 0.23.9, <= 0.52.0) + googleauth (>= 0.6.6, <= 0.14.0) + inifile (~> 3.0) + train-core (= 3.11.0) + train-winrm (~> 0.2) + train-aws (0.2.36) + aws-sdk-account (~> 1.14) + aws-sdk-alexaforbusiness (~> 1.0) + aws-sdk-amplify (~> 1.32.0) + aws-sdk-apigateway (~> 1.0) + aws-sdk-apigatewayv2 (~> 1.0) + aws-sdk-applicationautoscaling (>= 1.46, < 1.52) + aws-sdk-athena (~> 1.0) + aws-sdk-autoscaling (>= 1.22, < 1.93) + aws-sdk-batch (>= 1.36, < 1.74) + aws-sdk-budgets (~> 1.0) + aws-sdk-cloudformation (~> 1.0) + aws-sdk-cloudfront (~> 1.0) + aws-sdk-cloudhsm (~> 1.0) + aws-sdk-cloudhsmv2 (~> 1.0) + aws-sdk-cloudtrail (~> 1.8) + aws-sdk-cloudwatch (~> 1.13) + aws-sdk-cloudwatchevents (>= 1.36, < 1.63) + aws-sdk-cloudwatchlogs (~> 1.13) + aws-sdk-codecommit (~> 1.0) + aws-sdk-codedeploy (~> 1.0) + aws-sdk-codepipeline (~> 1.0) + aws-sdk-cognitoidentity (>= 1.26, < 1.46) + aws-sdk-cognitoidentityprovider (>= 1.46, < 1.77) + aws-sdk-configservice (~> 1.21) + aws-sdk-core (~> 3.0) + aws-sdk-costandusagereportservice (~> 1.6) + aws-sdk-databasemigrationservice (>= 1.42, < 1.81) + aws-sdk-dynamodb (~> 1.31) + aws-sdk-ec2 (~> 1.70) + aws-sdk-ecr (~> 1.18) + aws-sdk-ecrpublic (~> 1.3) + aws-sdk-ecs (~> 1.30) + aws-sdk-efs (~> 1.0) + aws-sdk-eks (~> 1.9) + aws-sdk-elasticache (~> 1.0) + aws-sdk-elasticbeanstalk (~> 1.0) + aws-sdk-elasticloadbalancing (~> 1.8) + aws-sdk-elasticloadbalancingv2 (~> 1.0) + aws-sdk-elasticsearchservice (~> 1.0) + aws-sdk-emr (~> 1.53.0) + aws-sdk-eventbridge (>= 1.24, < 1.47) + aws-sdk-firehose (~> 1.0) + aws-sdk-glue (>= 1.71, < 1.146) + aws-sdk-guardduty (~> 1.31) + aws-sdk-iam (~> 1.13) + aws-sdk-kafka (~> 1.0) + aws-sdk-kinesis (~> 1.0) + aws-sdk-kms (~> 1.13) + aws-sdk-lambda (~> 1.0) + aws-sdk-mq (~> 1.40.0) + aws-sdk-networkfirewall (>= 1.6.0) + aws-sdk-networkmanager (>= 1.13.0) + aws-sdk-organizations (>= 1.17, < 1.78) + aws-sdk-ram (>= 1.21, < 1.27) + aws-sdk-rds (~> 1.43) + aws-sdk-redshift (~> 1.0) + aws-sdk-route53 (~> 1.0) + aws-sdk-route53domains (~> 1.0) + aws-sdk-route53resolver (~> 1.0) + aws-sdk-s3 (~> 1.30) + aws-sdk-s3control (~> 1.43.0) + aws-sdk-secretsmanager (>= 1.42, < 1.47) + aws-sdk-securityhub (~> 1.0) + aws-sdk-servicecatalog (>= 1.48, < 1.61) + aws-sdk-ses (~> 1.41.0) + aws-sdk-shield (~> 1.30) + aws-sdk-signer (~> 1.32.0) + aws-sdk-simpledb (~> 1.29.0) + aws-sdk-sms (~> 1.0) + aws-sdk-sns (~> 1.9) + aws-sdk-sqs (~> 1.10) + aws-sdk-ssm (~> 1.0) + aws-sdk-states (>= 1.35, < 1.40) + aws-sdk-synthetics (~> 1.19.0) + aws-sdk-transfer (>= 1.26, < 1.74) + aws-sdk-waf (~> 1.43.0) + train-core (3.11.0) + addressable (~> 2.5) + ffi (!= 1.13.0) + json (>= 1.8, < 3.0) + mixlib-shellout (>= 2.0, < 4.0) + net-scp (>= 1.2, < 5.0) + net-ssh (>= 2.9, < 8.0) + train-habitat (0.2.22) + train-winrm (0.2.13) + winrm (>= 2.3.6, < 3.0) + winrm-elevated (~> 1.2.2) + winrm-fs (~> 1.0) + tty-box (0.7.0) + pastel (~> 0.8) + strings (~> 0.2.0) + tty-cursor (~> 0.7) + tty-color (0.6.0) + tty-cursor (0.7.1) + tty-prompt (0.23.1) + pastel (~> 0.8) + tty-reader (~> 0.8) + tty-reader (0.9.0) + tty-cursor (~> 0.7) + tty-screen (~> 0.8) + wisper (~> 2.0) + tty-screen (0.8.2) + tty-table (0.12.0) + pastel (~> 0.8) + strings (~> 0.2.0) + tty-screen (~> 0.8) + tzinfo (2.0.6) + concurrent-ruby (~> 1.0) + uber (0.1.0) + unicode-display_width (2.5.0) + unicode_utils (1.4.0) + vcr (6.2.0) + webmock (3.19.1) + addressable (>= 2.8.0) + crack (>= 0.3.2) + hashdiff (>= 0.4.0, < 2.0.0) + webrick (1.8.2) + winrm (2.3.6) + builder (>= 2.1.2) + erubi (~> 1.8) + gssapi (~> 1.2) + gyoku (~> 1.0) + httpclient (~> 2.2, >= 2.2.0.2) + logging (>= 1.6.1, < 3.0) + nori (~> 2.0) + rubyntlm (~> 0.6.0, >= 0.6.3) + winrm-elevated (1.2.3) + erubi (~> 1.8) + winrm (~> 2.0) + winrm-fs (~> 1.0) + winrm-fs (1.3.3) + erubi (~> 1.8) + logging (>= 1.6.1, < 3.0) + rubyzip (~> 1.1) + winrm (~> 2.0) + wisper (2.0.1) + zonefile (1.06) + +PLATFORMS + x86_64-darwin + x86_64-linux + +DEPENDENCIES + bundle + faraday (>= 0.16.2) + github_changelog_generator + google-api-client + google-cloud + googleauth + inifile + inspec-bin (= 4.16.0) + pry-coolline + rake + rubocop (>= 0.77.0) + vcr + webmock + +BUNDLED WITH + 2.5.17 diff --git a/mmv1/templates/inspec/doc_template.md.erb b/mmv1/templates/inspec/doc_template.md.erb index 937912f36..03160ef88 100644 --- a/mmv1/templates/inspec/doc_template.md.erb +++ b/mmv1/templates/inspec/doc_template.md.erb @@ -14,16 +14,32 @@ -%> <% autogen_exception -%> <% resource_underscored_name = plural ? resource_name(object, product).pluralize : resource_name(object, product) -%> ---- -title: About the <%= resource_underscored_name -%> resource -platform: gcp ---- ++++ + +title = "<%= resource_underscored_name -%> Resource" +platform = "gcp" +draft = false +gh_repo = "inspec-gcp" + + +[menu.inspec] + +title = "<%= resource_underscored_name -%>" +identifier = "inspec/resources/gcp/<%= resource_underscored_name -%> Resource" +parent = "inspec/resources/gcp" ++++ + +Use the `<%= resource_underscored_name -%>` InSpec audit resource to test the properties of a test a Google <%= object.name -%>. + +## Installation +{{% inspec_gcp_install %}} ## Syntax A `<%= resource_underscored_name -%>` is used to test a Google <%= object.name -%> resource <% if beta?(object) -%> + ## Beta Resource This resource has beta fields available. To retrieve these fields, include `beta: true` in the constructor for the resource @@ -44,13 +60,23 @@ This resource has beta fields available. To retrieve these fields, include `beta <% end -%> +## Parameters +Properties that can be accessed from the `<%= resource_underscored_name -%>` resource: + +<% if plural -%> +See <%= "[#{resource_name(object, product)}.md](#{resource_name(object, product)}.md)" -%> for more detailed information +<% object.all_user_properties.reject(&:exclude_plural).each do |prop| -%> +* `<%= "#{(prop.override_name || prop.out_name).pluralize}" -%>`: <% if prop.min_version.name == 'beta' -%><%= '(Beta only) ' -%><% end -%>an array of `<%= resource_name(object, product) -%>` <%= "#{prop.out_name}" -%> + +<% end -%> +<% end -%> ## Properties Properties that can be accessed from the `<%= resource_underscored_name -%>` resource: <% if plural -%> See <%= "[#{resource_name(object, product)}.md](#{resource_name(object, product)}.md)" -%> for more detailed information <% object.all_user_properties.reject(&:exclude_plural).each do |prop| -%> - * `<%= "#{(prop.override_name || prop.out_name).pluralize}" -%>`: <% if prop.min_version.name == 'beta' -%><%= '(Beta only) ' -%><% end -%>an array of `<%= resource_name(object, product) -%>` <%= "#{prop.out_name}" -%> +* `<%= "#{(prop.override_name || prop.out_name).pluralize}" -%>`: <% if prop.min_version.name == 'beta' -%><%= '(Beta only) ' -%><% end -%>an array of `<%= resource_name(object, product) -%>` <%= "#{prop.out_name}" -%> <% end -%> diff --git a/mmv1/templates/inspec/examples/google_apigee_organization/google_apigee_organization.erb b/mmv1/templates/inspec/examples/google_apigee_organization/google_apigee_organization.erb new file mode 100644 index 000000000..be008cc81 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_apigee_organization/google_apigee_organization.erb @@ -0,0 +1,32 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% organization = grab_attributes(pwd)['organization'] -%> +describe google_apigee_organization(name: <%= doc_generation ? "' #{organization['name']}'":"organization['name']" -%>) do + it { should exist } + its('api_consumer_data_encryption_key_name') { should cmp <%= doc_generation ? "'#{organization['api_consumer_data_encryption_key_name']}'" : "organization['api_consumer_data_encryption_key_name']" -%> } + its('runtime_database_encryption_key_name') { should cmp <%= doc_generation ? "'#{organization['runtime_database_encryption_key_name']}'" : "organization['runtime_database_encryption_key_name']" -%> } + its('runtime_type') { should cmp <%= doc_generation ? "'#{organization['runtime_type']}'" : "organization['runtime_type']" -%> } + its('type') { should cmp <%= doc_generation ? "'#{organization['type']}'" : "organization['type']" -%> } + its('authorized_network') { should cmp <%= doc_generation ? "'#{organization['authorized_network']}'" : "organization['authorized_network']" -%> } + its('project_id') { should cmp <%= doc_generation ? "'#{organization['project_id']}'" : "organization['project_id']" -%> } + its('description') { should cmp <%= doc_generation ? "'#{organization['description']}'" : "organization['description']" -%> } + its('ca_certificate') { should cmp <%= doc_generation ? "'#{organization['ca_certificate']}'" : "organization['ca_certificate']" -%> } + its('subscription_type') { should cmp <%= doc_generation ? "'#{organization['subscription_type']}'" : "organization['subscription_type']" -%> } + its('customer_name') { should cmp <%= doc_generation ? "'#{organization['customer_name']}'" : "organization['customer_name']" -%> } + its('created_at') { should cmp <%= doc_generation ? "'#{organization['created_at']}'" : "organization['created_at']" -%> } + its('last_modified_at') { should cmp <%= doc_generation ? "'#{organization['last_modified_at']}'" : "organization['last_modified_at']" -%> } + its('subscription_plan') { should cmp <%= doc_generation ? "'#{organization['subscription_plan']}'" : "organization['subscription_plan']" -%> } + its('state') { should cmp <%= doc_generation ? "'#{organization['state']}'" : "organization['state']" -%> } + its('name') { should cmp <%= doc_generation ? "'#{organization['name']}'" : "organization['name']" -%> } + its('control_plane_encryption_key_name') { should cmp <%= doc_generation ? "'#{organization['control_plane_encryption_key_name']}'" : "organization['control_plane_encryption_key_name']" -%> } + its('analytics_region') { should cmp <%= doc_generation ? "'#{organization['analytics_region']}'" : "organization['analytics_region']" -%> } + its('api_consumer_data_location') { should cmp <%= doc_generation ? "'#{organization['api_consumer_data_location']}'" : "organization['api_consumer_data_location']" -%> } + its('display_name') { should cmp <%= doc_generation ? "'#{organization['display_name']}'" : "organization['display_name']" -%> } + its('apigee_project_id') { should cmp <%= doc_generation ? "'#{organization['apigee_project_id']}'" : "organization['apigee_project_id']" -%> } + its('expires_at') { should cmp <%= doc_generation ? "'#{organization['expires_at']}'" : "organization['expires_at']" -%> } + its('billing_type') { should cmp <%= doc_generation ? "'#{organization['billing_type']}'" : "organization['billing_type']" -%> } + +end + +describe google_apigee_organization(name: "does_not_exit") do + it { should_not exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_apigee_organization/google_apigee_organization_attributes.erb b/mmv1/templates/inspec/examples/google_apigee_organization/google_apigee_organization_attributes.erb new file mode 100644 index 000000000..c727f9dd1 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_apigee_organization/google_apigee_organization_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + organization = input('organization', value: <%= JSON.pretty_generate(grab_attributes(pwd)['organization']) -%>, description: 'organization description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_apigee_organization/google_apigee_organizations.erb b/mmv1/templates/inspec/examples/google_apigee_organization/google_apigee_organizations.erb new file mode 100644 index 000000000..409733b51 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_apigee_organization/google_apigee_organizations.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% organization = grab_attributes(pwd)['organization'] -%> + describe google_apigee_organizations(parent: <%= doc_generation ? "' #{organization['parent']}'":"organization['parent']" -%>) do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_apigee_organization_api/google_apigee_organization_api.erb b/mmv1/templates/inspec/examples/google_apigee_organization_api/google_apigee_organization_api.erb new file mode 100644 index 000000000..95031f6ef --- /dev/null +++ b/mmv1/templates/inspec/examples/google_apigee_organization_api/google_apigee_organization_api.erb @@ -0,0 +1,13 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% organization_api = grab_attributes(pwd)['organization_api'] -%> +describe google_apigee_organization_api(name: <%= doc_generation ? "' #{organization_api['name']}'":"organization_api['name']" -%>) do + it { should exist } + its('latest_revision_id') { should cmp <%= doc_generation ? "'#{organization_api['latest_revision_id']}'" : "organization_api['latest_revision_id']" -%> } + its('api_proxy_type') { should cmp <%= doc_generation ? "'#{organization_api['api_proxy_type']}'" : "organization_api['api_proxy_type']" -%> } + its('name') { should cmp <%= doc_generation ? "'#{organization_api['name']}'" : "organization_api['name']" -%> } + +end + +describe google_apigee_organization_api(name: "does_not_exit") do + it { should_not exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_apigee_organization_api/google_apigee_organization_api_attributes.erb b/mmv1/templates/inspec/examples/google_apigee_organization_api/google_apigee_organization_api_attributes.erb new file mode 100644 index 000000000..7ae5bff82 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_apigee_organization_api/google_apigee_organization_api_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + organization_api = input('organization_api', value: <%= JSON.pretty_generate(grab_attributes(pwd)['organization_api']) -%>, description: 'organization_api description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_apigee_organization_api/google_apigee_organization_apis.erb b/mmv1/templates/inspec/examples/google_apigee_organization_api/google_apigee_organization_apis.erb new file mode 100644 index 000000000..a8ff576f0 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_apigee_organization_api/google_apigee_organization_apis.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% organization_api = grab_attributes(pwd)['organization_api'] -%> + describe google_apigee_organization_apis(parent: <%= doc_generation ? "' #{organization_api['parent']}'":"organization_api['parent']" -%>) do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_apigee_organization_api_revision/google_apigee_organization_api_revision.erb b/mmv1/templates/inspec/examples/google_apigee_organization_api_revision/google_apigee_organization_api_revision.erb new file mode 100644 index 000000000..121b2cf1f --- /dev/null +++ b/mmv1/templates/inspec/examples/google_apigee_organization_api_revision/google_apigee_organization_api_revision.erb @@ -0,0 +1,12 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% organization_api_revision = grab_attributes(pwd)['organization_api_revision'] -%> +describe google_apigee_organization_api_revision(name: <%= doc_generation ? "' #{organization_api_revision['name']}'":"organization_api_revision['name']" -%>) do + it { should exist } + its('content_type') { should cmp <%= doc_generation ? "'#{organization_api_revision['content_type']}'" : "organization_api_revision['content_type']" -%> } + its('data') { should cmp <%= doc_generation ? "'#{organization_api_revision['data']}'" : "organization_api_revision['data']" -%> } + +end + +describe google_apigee_organization_api_revision(name: "does_not_exit") do + it { should_not exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_apigee_organization_api_revision/google_apigee_organization_api_revision_attributes.erb b/mmv1/templates/inspec/examples/google_apigee_organization_api_revision/google_apigee_organization_api_revision_attributes.erb new file mode 100644 index 000000000..f2fc3d7d2 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_apigee_organization_api_revision/google_apigee_organization_api_revision_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + organization_api_revision = input('organization_api_revision', value: <%= JSON.pretty_generate(grab_attributes(pwd)['organization_api_revision']) -%>, description: 'organization_api_revision description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_apigee_organization_api_revision/google_apigee_organization_api_revisions.erb b/mmv1/templates/inspec/examples/google_apigee_organization_api_revision/google_apigee_organization_api_revisions.erb new file mode 100644 index 000000000..ca64e3a91 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_apigee_organization_api_revision/google_apigee_organization_api_revisions.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% organization_api_revision = grab_attributes(pwd)['organization_api_revision'] -%> + describe google_apigee_organization_api_revisions() do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_apigee_organization_envgroup/google_apigee_organization_envgroup.erb b/mmv1/templates/inspec/examples/google_apigee_organization_envgroup/google_apigee_organization_envgroup.erb new file mode 100644 index 000000000..04f6f2526 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_apigee_organization_envgroup/google_apigee_organization_envgroup.erb @@ -0,0 +1,14 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% organization_envgroup = grab_attributes(pwd)['organization_envgroup'] -%> +describe google_apigee_organization_envgroup(name: <%= doc_generation ? "' #{organization_envgroup['name']}'":"organization_envgroup['name']" -%>) do + it { should exist } + its('name') { should cmp <%= doc_generation ? "'#{organization_envgroup['name']}'" : "organization_envgroup['name']" -%> } + its('last_modified_at') { should cmp <%= doc_generation ? "'#{organization_envgroup['last_modified_at']}'" : "organization_envgroup['last_modified_at']" -%> } + its('state') { should cmp <%= doc_generation ? "'#{organization_envgroup['state']}'" : "organization_envgroup['state']" -%> } + its('created_at') { should cmp <%= doc_generation ? "'#{organization_envgroup['created_at']}'" : "organization_envgroup['created_at']" -%> } + +end + +describe google_apigee_organization_envgroup(name: "does_not_exit") do + it { should_not exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_apigee_organization_envgroup/google_apigee_organization_envgroup_attributes.erb b/mmv1/templates/inspec/examples/google_apigee_organization_envgroup/google_apigee_organization_envgroup_attributes.erb new file mode 100644 index 000000000..0663d1f90 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_apigee_organization_envgroup/google_apigee_organization_envgroup_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + organization_envgroup = input('organization_envgroup', value: <%= JSON.pretty_generate(grab_attributes(pwd)['organization_envgroup']) -%>, description: 'organization_envgroup description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_apigee_organization_envgroup/google_apigee_organization_envgroups.erb b/mmv1/templates/inspec/examples/google_apigee_organization_envgroup/google_apigee_organization_envgroups.erb new file mode 100644 index 000000000..5f87fde16 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_apigee_organization_envgroup/google_apigee_organization_envgroups.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% organization_envgroup = grab_attributes(pwd)['organization_envgroup'] -%> + describe google_apigee_organization_envgroups(parent: <%= doc_generation ? "' #{organization_envgroup['parent']}'":"organization_envgroup['parent']" -%>) do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_apigee_organization_envgroup_attachment/google_apigee_organization_envgroup_attachment.erb b/mmv1/templates/inspec/examples/google_apigee_organization_envgroup_attachment/google_apigee_organization_envgroup_attachment.erb new file mode 100644 index 000000000..cc52129ba --- /dev/null +++ b/mmv1/templates/inspec/examples/google_apigee_organization_envgroup_attachment/google_apigee_organization_envgroup_attachment.erb @@ -0,0 +1,14 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% organization_envgroup_attachment = grab_attributes(pwd)['organization_envgroup_attachment'] -%> +describe google_apigee_organization_envgroup_attachment(name: <%= doc_generation ? "' #{organization_envgroup_attachment['name']}'":"organization_envgroup_attachment['name']" -%>) do + it { should exist } + its('created_at') { should cmp <%= doc_generation ? "'#{organization_envgroup_attachment['created_at']}'" : "organization_envgroup_attachment['created_at']" -%> } + its('environment') { should cmp <%= doc_generation ? "'#{organization_envgroup_attachment['environment']}'" : "organization_envgroup_attachment['environment']" -%> } + its('environment_group_id') { should cmp <%= doc_generation ? "'#{organization_envgroup_attachment['environment_group_id']}'" : "organization_envgroup_attachment['environment_group_id']" -%> } + its('name') { should cmp <%= doc_generation ? "'#{organization_envgroup_attachment['name']}'" : "organization_envgroup_attachment['name']" -%> } + +end + +describe google_apigee_organization_envgroup_attachment(name: "does_not_exit") do + it { should_not exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_apigee_organization_envgroup_attachment/google_apigee_organization_envgroup_attachment_attributes.erb b/mmv1/templates/inspec/examples/google_apigee_organization_envgroup_attachment/google_apigee_organization_envgroup_attachment_attributes.erb new file mode 100644 index 000000000..e93cd6671 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_apigee_organization_envgroup_attachment/google_apigee_organization_envgroup_attachment_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + organization_envgroup_attachment = input('organization_envgroup_attachment', value: <%= JSON.pretty_generate(grab_attributes(pwd)['organization_envgroup_attachment']) -%>, description: 'organization_envgroup_attachment description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_apigee_organization_envgroup_attachment/google_apigee_organization_envgroup_attachments.erb b/mmv1/templates/inspec/examples/google_apigee_organization_envgroup_attachment/google_apigee_organization_envgroup_attachments.erb new file mode 100644 index 000000000..a14609955 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_apigee_organization_envgroup_attachment/google_apigee_organization_envgroup_attachments.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% organization_envgroup_attachment = grab_attributes(pwd)['organization_envgroup_attachment'] -%> + describe google_apigee_organization_envgroup_attachments(parent: <%= doc_generation ? "' #{organization_envgroup_attachment['parent']}'":"organization_envgroup_attachment['parent']" -%>) do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_apigee_organization_instance_attachment/google_apigee_organization_instance_attachment.erb b/mmv1/templates/inspec/examples/google_apigee_organization_instance_attachment/google_apigee_organization_instance_attachment.erb new file mode 100644 index 000000000..1083ef5c6 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_apigee_organization_instance_attachment/google_apigee_organization_instance_attachment.erb @@ -0,0 +1,13 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% organization_instance_attachment = grab_attributes(pwd)['organization_instance_attachment'] -%> +describe google_apigee_organization_instance_attachment(name: <%= doc_generation ? "' #{organization_instance_attachment['name']}'":"organization_instance_attachment['name']" -%>) do + it { should exist } + its('name') { should cmp <%= doc_generation ? "'#{organization_instance_attachment['name']}'" : "organization_instance_attachment['name']" -%> } + its('environment') { should cmp <%= doc_generation ? "'#{organization_instance_attachment['environment']}'" : "organization_instance_attachment['environment']" -%> } + its('created_at') { should cmp <%= doc_generation ? "'#{organization_instance_attachment['created_at']}'" : "organization_instance_attachment['created_at']" -%> } + +end + +describe google_apigee_organization_instance_attachment(name: "does_not_exit") do + it { should_not exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_apigee_organization_instance_attachment/google_apigee_organization_instance_attachment_attributes.erb b/mmv1/templates/inspec/examples/google_apigee_organization_instance_attachment/google_apigee_organization_instance_attachment_attributes.erb new file mode 100644 index 000000000..f37b007b4 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_apigee_organization_instance_attachment/google_apigee_organization_instance_attachment_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + organization_instance_attachment = input('organization_instance_attachment', value: <%= JSON.pretty_generate(grab_attributes(pwd)['organization_instance_attachment']) -%>, description: 'organization_instance_attachment description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_apigee_organization_instance_attachment/google_apigee_organization_instance_attachments.erb b/mmv1/templates/inspec/examples/google_apigee_organization_instance_attachment/google_apigee_organization_instance_attachments.erb new file mode 100644 index 000000000..b7f158331 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_apigee_organization_instance_attachment/google_apigee_organization_instance_attachments.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% organization_instance_attachment = grab_attributes(pwd)['organization_instance_attachment'] -%> + describe google_apigee_organization_instance_attachments(parent: <%= doc_generation ? "' #{organization_instance_attachment['parent']}'":"organization_instance_attachment['parent']" -%>) do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_artifactregistry_project_location_repository/google_artifactregistry_project_location_repositories.erb b/mmv1/templates/inspec/examples/google_artifactregistry_project_location_repository/google_artifactregistry_project_location_repositories.erb new file mode 100644 index 000000000..966c3092f --- /dev/null +++ b/mmv1/templates/inspec/examples/google_artifactregistry_project_location_repository/google_artifactregistry_project_location_repositories.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% project_location_repository = grab_attributes(pwd)['project_location_repository'] -%> + describe google_artifactregistry_project_location_repositories(parent: <%= doc_generation ? "' #{project_location_repository['parent']}'":"project_location_repository['parent']" -%>) do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_artifactregistry_project_location_repository/google_artifactregistry_project_location_repository.erb b/mmv1/templates/inspec/examples/google_artifactregistry_project_location_repository/google_artifactregistry_project_location_repository.erb new file mode 100644 index 000000000..b927b672f --- /dev/null +++ b/mmv1/templates/inspec/examples/google_artifactregistry_project_location_repository/google_artifactregistry_project_location_repository.erb @@ -0,0 +1,10 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% project_location_repository = grab_attributes(pwd)['project_location_repository'] -%> +describe google_artifactregistry_project_location_repository(name: <%= doc_generation ? "' #{project_location_repository['name']}'":"project_location_repository['name']" -%>) do + it { should exist } + +end + +describe google_artifactregistry_project_location_repository(name: "does_not_exit") do + it { should_not exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_artifactregistry_project_location_repository/google_artifactregistry_project_location_repository_attributes.erb b/mmv1/templates/inspec/examples/google_artifactregistry_project_location_repository/google_artifactregistry_project_location_repository_attributes.erb new file mode 100644 index 000000000..f3bb65553 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_artifactregistry_project_location_repository/google_artifactregistry_project_location_repository_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + project_location_repository = input('project_location_repository', value: <%= JSON.pretty_generate(grab_attributes(pwd)['project_location_repository']) -%>, description: 'project_location_repository description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_bigtableadmin_project_instance_app_profile/google_bigtableadmin_project_instance_app_profile.erb b/mmv1/templates/inspec/examples/google_bigtableadmin_project_instance_app_profile/google_bigtableadmin_project_instance_app_profile.erb new file mode 100644 index 000000000..71a1ccfcc --- /dev/null +++ b/mmv1/templates/inspec/examples/google_bigtableadmin_project_instance_app_profile/google_bigtableadmin_project_instance_app_profile.erb @@ -0,0 +1,14 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% project_instance_app_profile = grab_attributes(pwd)['project_instance_app_profile'] -%> +describe google_bigtableadmin_project_instance_app_profile(name: <%= doc_generation ? "' #{project_instance_app_profile['name']}'":"project_instance_app_profile['name']" -%>) do + it { should exist } + its('name') { should cmp <%= doc_generation ? "'#{project_instance_app_profile['name']}'" : "project_instance_app_profile['name']" -%> } + its('etag') { should cmp <%= doc_generation ? "'#{project_instance_app_profile['etag']}'" : "project_instance_app_profile['etag']" -%> } + its('description') { should cmp <%= doc_generation ? "'#{project_instance_app_profile['description']}'" : "project_instance_app_profile['description']" -%> } + its('priority') { should cmp <%= doc_generation ? "'#{project_instance_app_profile['priority']}'" : "project_instance_app_profile['priority']" -%> } + +end + +describe google_bigtableadmin_project_instance_app_profile(name: "does_not_exit") do + it { should_not exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_bigtableadmin_project_instance_app_profile/google_bigtableadmin_project_instance_app_profile_attributes.erb b/mmv1/templates/inspec/examples/google_bigtableadmin_project_instance_app_profile/google_bigtableadmin_project_instance_app_profile_attributes.erb new file mode 100644 index 000000000..00ca9fcb6 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_bigtableadmin_project_instance_app_profile/google_bigtableadmin_project_instance_app_profile_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + project_instance_app_profile = input('project_instance_app_profile', value: <%= JSON.pretty_generate(grab_attributes(pwd)['project_instance_app_profile']) -%>, description: 'project_instance_app_profile description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_bigtableadmin_project_instance_app_profile/google_bigtableadmin_project_instance_app_profiles.erb b/mmv1/templates/inspec/examples/google_bigtableadmin_project_instance_app_profile/google_bigtableadmin_project_instance_app_profiles.erb new file mode 100644 index 000000000..1e50d9add --- /dev/null +++ b/mmv1/templates/inspec/examples/google_bigtableadmin_project_instance_app_profile/google_bigtableadmin_project_instance_app_profiles.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% project_instance_app_profile = grab_attributes(pwd)['project_instance_app_profile'] -%> + describe google_bigtableadmin_project_instance_app_profiles(parent: <%= doc_generation ? "' #{project_instance_app_profile['parent']}'":"project_instance_app_profile['parent']" -%>) do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_bigtableadmin_project_instance_cluster/google_bigtableadmin_project_instance_cluster.erb b/mmv1/templates/inspec/examples/google_bigtableadmin_project_instance_cluster/google_bigtableadmin_project_instance_cluster.erb new file mode 100644 index 000000000..4b91950b0 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_bigtableadmin_project_instance_cluster/google_bigtableadmin_project_instance_cluster.erb @@ -0,0 +1,14 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% project_instance_cluster = grab_attributes(pwd)['project_instance_cluster'] -%> +describe google_bigtableadmin_project_instance_cluster(name: <%= doc_generation ? "' #{project_instance_cluster['name']}'":"project_instance_cluster['name']" -%>) do + it { should exist } + its('name') { should cmp <%= doc_generation ? "'#{project_instance_cluster['name']}'" : "project_instance_cluster['name']" -%> } + its('location') { should cmp <%= doc_generation ? "'#{project_instance_cluster['location']}'" : "project_instance_cluster['location']" -%> } + its('state') { should cmp <%= doc_generation ? "'#{project_instance_cluster['state']}'" : "project_instance_cluster['state']" -%> } + its('default_storage_type') { should cmp <%= doc_generation ? "'#{project_instance_cluster['default_storage_type']}'" : "project_instance_cluster['default_storage_type']" -%> } + +end + +describe google_bigtableadmin_project_instance_cluster(name: "does_not_exit") do + it { should_not exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_bigtableadmin_project_instance_cluster/google_bigtableadmin_project_instance_cluster_attributes.erb b/mmv1/templates/inspec/examples/google_bigtableadmin_project_instance_cluster/google_bigtableadmin_project_instance_cluster_attributes.erb new file mode 100644 index 000000000..9215c480d --- /dev/null +++ b/mmv1/templates/inspec/examples/google_bigtableadmin_project_instance_cluster/google_bigtableadmin_project_instance_cluster_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + project_instance_cluster = input('project_instance_cluster', value: <%= JSON.pretty_generate(grab_attributes(pwd)['project_instance_cluster']) -%>, description: 'project_instance_cluster description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_bigtableadmin_project_instance_cluster/google_bigtableadmin_project_instance_clusters.erb b/mmv1/templates/inspec/examples/google_bigtableadmin_project_instance_cluster/google_bigtableadmin_project_instance_clusters.erb new file mode 100644 index 000000000..f69fa98f8 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_bigtableadmin_project_instance_cluster/google_bigtableadmin_project_instance_clusters.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% project_instance_cluster = grab_attributes(pwd)['project_instance_cluster'] -%> + describe google_bigtableadmin_project_instance_clusters(parent: <%= doc_generation ? "' #{project_instance_cluster['parent']}'":"project_instance_cluster['parent']" -%>) do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_bigtableadmin_project_instance_cluster_backup/google_bigtableadmin_project_instance_cluster_backup.erb b/mmv1/templates/inspec/examples/google_bigtableadmin_project_instance_cluster_backup/google_bigtableadmin_project_instance_cluster_backup.erb new file mode 100644 index 000000000..7dff9a9d6 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_bigtableadmin_project_instance_cluster_backup/google_bigtableadmin_project_instance_cluster_backup.erb @@ -0,0 +1,18 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% project_instance_cluster_backup = grab_attributes(pwd)['project_instance_cluster_backup'] -%> +describe google_bigtableadmin_project_instance_cluster_backup(name: <%= doc_generation ? "' #{project_instance_cluster_backup['name']}'":"project_instance_cluster_backup['name']" -%>) do + it { should exist } + its('name') { should cmp <%= doc_generation ? "'#{project_instance_cluster_backup['name']}'" : "project_instance_cluster_backup['name']" -%> } + its('source_table') { should cmp <%= doc_generation ? "'#{project_instance_cluster_backup['source_table']}'" : "project_instance_cluster_backup['source_table']" -%> } + its('source_backup') { should cmp <%= doc_generation ? "'#{project_instance_cluster_backup['source_backup']}'" : "project_instance_cluster_backup['source_backup']" -%> } + its('expire_time') { should cmp <%= doc_generation ? "'#{project_instance_cluster_backup['expire_time']}'" : "project_instance_cluster_backup['expire_time']" -%> } + its('start_time') { should cmp <%= doc_generation ? "'#{project_instance_cluster_backup['start_time']}'" : "project_instance_cluster_backup['start_time']" -%> } + its('end_time') { should cmp <%= doc_generation ? "'#{project_instance_cluster_backup['end_time']}'" : "project_instance_cluster_backup['end_time']" -%> } + its('size_bytes') { should cmp <%= doc_generation ? "'#{project_instance_cluster_backup['size_bytes']}'" : "project_instance_cluster_backup['size_bytes']" -%> } + its('state') { should cmp <%= doc_generation ? "'#{project_instance_cluster_backup['state']}'" : "project_instance_cluster_backup['state']" -%> } + +end + +describe google_bigtableadmin_project_instance_cluster_backup(name: "does_not_exit") do + it { should_not exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_bigtableadmin_project_instance_cluster_backup/google_bigtableadmin_project_instance_cluster_backup_attributes.erb b/mmv1/templates/inspec/examples/google_bigtableadmin_project_instance_cluster_backup/google_bigtableadmin_project_instance_cluster_backup_attributes.erb new file mode 100644 index 000000000..cd06a646c --- /dev/null +++ b/mmv1/templates/inspec/examples/google_bigtableadmin_project_instance_cluster_backup/google_bigtableadmin_project_instance_cluster_backup_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + project_instance_cluster_backup = input('project_instance_cluster_backup', value: <%= JSON.pretty_generate(grab_attributes(pwd)['project_instance_cluster_backup']) -%>, description: 'project_instance_cluster_backup description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_bigtableadmin_project_instance_cluster_backup/google_bigtableadmin_project_instance_cluster_backups.erb b/mmv1/templates/inspec/examples/google_bigtableadmin_project_instance_cluster_backup/google_bigtableadmin_project_instance_cluster_backups.erb new file mode 100644 index 000000000..aba3493d5 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_bigtableadmin_project_instance_cluster_backup/google_bigtableadmin_project_instance_cluster_backups.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% project_instance_cluster_backup = grab_attributes(pwd)['project_instance_cluster_backup'] -%> + describe google_bigtableadmin_project_instance_cluster_backups(parent: <%= doc_generation ? "' #{project_instance_cluster_backup['parent']}'":"project_instance_cluster_backup['parent']" -%>) do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_cloudkms_project_location_key_ring/google_cloudkms_project_location_key_ring.erb b/mmv1/templates/inspec/examples/google_cloudkms_project_location_key_ring/google_cloudkms_project_location_key_ring.erb new file mode 100644 index 000000000..65290bf8f --- /dev/null +++ b/mmv1/templates/inspec/examples/google_cloudkms_project_location_key_ring/google_cloudkms_project_location_key_ring.erb @@ -0,0 +1,12 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% project_location_key_ring = grab_attributes(pwd)['project_location_key_ring'] -%> +describe google_cloudkms_project_location_key_ring(name: <%= doc_generation ? "' #{project_location_key_ring['name']}'":"project_location_key_ring['name']" -%>) do + it { should exist } + its('name') { should cmp <%= doc_generation ? "'#{project_location_key_ring['name']}'" : "project_location_key_ring['name']" -%> } + its('create_time') { should cmp <%= doc_generation ? "'#{project_location_key_ring['create_time']}'" : "project_location_key_ring['create_time']" -%> } + +end + +describe google_cloudkms_project_location_key_ring(name: "does_not_exit") do + it { should_not exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_cloudkms_project_location_key_ring/google_cloudkms_project_location_key_ring_attributes.erb b/mmv1/templates/inspec/examples/google_cloudkms_project_location_key_ring/google_cloudkms_project_location_key_ring_attributes.erb new file mode 100644 index 000000000..b2002da0b --- /dev/null +++ b/mmv1/templates/inspec/examples/google_cloudkms_project_location_key_ring/google_cloudkms_project_location_key_ring_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + project_location_key_ring = input('project_location_key_ring', value: <%= JSON.pretty_generate(grab_attributes(pwd)['project_location_key_ring']) -%>, description: 'project_location_key_ring description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_cloudkms_project_location_key_ring/google_cloudkms_project_location_key_rings.erb b/mmv1/templates/inspec/examples/google_cloudkms_project_location_key_ring/google_cloudkms_project_location_key_rings.erb new file mode 100644 index 000000000..683d2f1bc --- /dev/null +++ b/mmv1/templates/inspec/examples/google_cloudkms_project_location_key_ring/google_cloudkms_project_location_key_rings.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% project_location_key_ring = grab_attributes(pwd)['project_location_key_ring'] -%> + describe google_cloudkms_project_location_key_rings() do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_composer_project_location_environment/google_composer_project_location_environment.erb b/mmv1/templates/inspec/examples/google_composer_project_location_environment/google_composer_project_location_environment.erb new file mode 100644 index 000000000..4a6bd1c78 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_composer_project_location_environment/google_composer_project_location_environment.erb @@ -0,0 +1,15 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% project_location_environment = grab_attributes(pwd)['project_location_environment'] -%> +describe google_composer_project_location_environment(name: <%= doc_generation ? "' #{project_location_environment['name']}'":"project_location_environment['name']" -%>) do + it { should exist } + its('name') { should cmp <%= doc_generation ? "'#{project_location_environment['name']}'" : "project_location_environment['name']" -%> } + its('uuid') { should cmp <%= doc_generation ? "'#{project_location_environment['uuid']}'" : "project_location_environment['uuid']" -%> } + its('state') { should cmp <%= doc_generation ? "'#{project_location_environment['state']}'" : "project_location_environment['state']" -%> } + its('create_time') { should cmp <%= doc_generation ? "'#{project_location_environment['create_time']}'" : "project_location_environment['create_time']" -%> } + its('update_time') { should cmp <%= doc_generation ? "'#{project_location_environment['update_time']}'" : "project_location_environment['update_time']" -%> } + +end + +describe google_composer_project_location_environment(name: "does_not_exit") do + it { should_not exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_composer_project_location_environment/google_composer_project_location_environment_attributes.erb b/mmv1/templates/inspec/examples/google_composer_project_location_environment/google_composer_project_location_environment_attributes.erb new file mode 100644 index 000000000..ca852318f --- /dev/null +++ b/mmv1/templates/inspec/examples/google_composer_project_location_environment/google_composer_project_location_environment_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + project_location_environment = input('project_location_environment', value: <%= JSON.pretty_generate(grab_attributes(pwd)['project_location_environment']) -%>, description: 'project_location_environment description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_composer_project_location_environment/google_composer_project_location_environments.erb b/mmv1/templates/inspec/examples/google_composer_project_location_environment/google_composer_project_location_environments.erb new file mode 100644 index 000000000..4f2ce2b58 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_composer_project_location_environment/google_composer_project_location_environments.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% project_location_environment = grab_attributes(pwd)['project_location_environment'] -%> + describe google_composer_project_location_environments(parent: <%= doc_generation ? "' #{project_location_environment['parent']}'":"project_location_environment['parent']" -%>) do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_composer_project_location_image_version/google_composer_project_location_image_version.erb b/mmv1/templates/inspec/examples/google_composer_project_location_image_version/google_composer_project_location_image_version.erb new file mode 100644 index 000000000..e7b46101d --- /dev/null +++ b/mmv1/templates/inspec/examples/google_composer_project_location_image_version/google_composer_project_location_image_version.erb @@ -0,0 +1,10 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% project_location_image_version = grab_attributes(pwd)['project_location_image_version'] -%> +describe google_composer_project_location_image_version() do + it { should exist } + +end + +describe google_composer_project_location_image_version() do + it { should_not exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_composer_project_location_image_version/google_composer_project_location_image_version_attributes.erb b/mmv1/templates/inspec/examples/google_composer_project_location_image_version/google_composer_project_location_image_version_attributes.erb new file mode 100644 index 000000000..e3bf302c1 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_composer_project_location_image_version/google_composer_project_location_image_version_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + project_location_image_version = input('project_location_image_version', value: <%= JSON.pretty_generate(grab_attributes(pwd)['project_location_image_version']) -%>, description: 'project_location_image_version description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_composer_project_location_image_version/google_composer_project_location_image_versions.erb b/mmv1/templates/inspec/examples/google_composer_project_location_image_version/google_composer_project_location_image_versions.erb new file mode 100644 index 000000000..af9a14b45 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_composer_project_location_image_version/google_composer_project_location_image_versions.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% project_location_image_version = grab_attributes(pwd)['project_location_image_version'] -%> + describe google_composer_project_location_image_versions(parent: <%= doc_generation ? "' #{project_location_image_version['parent']}'":"project_location_image_version['parent']" -%>) do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_compute_global_network_endpoint_group/google_compute_global_network_endpoint_group.erb b/mmv1/templates/inspec/examples/google_compute_global_network_endpoint_group/google_compute_global_network_endpoint_group.erb index 069f7e809..8fd75ae3a 100644 --- a/mmv1/templates/inspec/examples/google_compute_global_network_endpoint_group/google_compute_global_network_endpoint_group.erb +++ b/mmv1/templates/inspec/examples/google_compute_global_network_endpoint_group/google_compute_global_network_endpoint_group.erb @@ -1,6 +1,22 @@ <% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> -<% accelerator_type = grab_attributes(pwd)['accelerator_type'] -%> -describe google_compute_accelerator_type(project: <%= gcp_project_id -%>, zone: 'us-east1-b', name: <%= doc_generation ? "'#{accelerator_type['name']}'" : "accelerator_type['name']" -%>) do - it { should exist } - it { should be_up } +<% global_network_endpoint_group = grab_attributes(pwd)['global_network_endpoint_group'] -%> +describe google_compute_global_network_endpoint_group(networkEndpointGroup: <%= doc_generation ? "' #{global_network_endpoint_group['networkEndpointGroup']}'":"global_network_endpoint_group['networkEndpointGroup']" -%>, project: <%= gcp_project_id -%>) do + it { should exist } + its('kind') { should cmp <%= doc_generation ? "'#{global_network_endpoint_group['kind']}'" : "global_network_endpoint_group['kind']" -%> } + its('id') { should cmp <%= doc_generation ? "'#{global_network_endpoint_group['id']}'" : "global_network_endpoint_group['id']" -%> } + its('creation_timestamp') { should cmp <%= doc_generation ? "'#{global_network_endpoint_group['creation_timestamp']}'" : "global_network_endpoint_group['creation_timestamp']" -%> } + its('self_link') { should cmp <%= doc_generation ? "'#{global_network_endpoint_group['self_link']}'" : "global_network_endpoint_group['self_link']" -%> } + its('name') { should cmp <%= doc_generation ? "'#{global_network_endpoint_group['name']}'" : "global_network_endpoint_group['name']" -%> } + its('description') { should cmp <%= doc_generation ? "'#{global_network_endpoint_group['description']}'" : "global_network_endpoint_group['description']" -%> } + its('network_endpoint_type') { should cmp <%= doc_generation ? "'#{global_network_endpoint_group['network_endpoint_type']}'" : "global_network_endpoint_group['network_endpoint_type']" -%> } + its('region') { should cmp <%= doc_generation ? "'#{global_network_endpoint_group['region']}'" : "global_network_endpoint_group['region']" -%> } + its('zone') { should cmp <%= doc_generation ? "'#{global_network_endpoint_group['zone']}'" : "global_network_endpoint_group['zone']" -%> } + its('network') { should cmp <%= doc_generation ? "'#{global_network_endpoint_group['network']}'" : "global_network_endpoint_group['network']" -%> } + its('subnetwork') { should cmp <%= doc_generation ? "'#{global_network_endpoint_group['subnetwork']}'" : "global_network_endpoint_group['subnetwork']" -%> } + its('psc_target_service') { should cmp <%= doc_generation ? "'#{global_network_endpoint_group['psc_target_service']}'" : "global_network_endpoint_group['psc_target_service']" -%> } + +end + +describe google_compute_global_network_endpoint_group(networkEndpointGroup: <%= doc_generation ? "' #{global_network_endpoint_group['networkEndpointGroup']}'":"global_network_endpoint_group['networkEndpointGroup']" -%>, project: <%= gcp_project_id -%>) do + it { should_not exist } end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_compute_global_network_endpoint_group/google_compute_global_network_endpoint_group_attributes.erb b/mmv1/templates/inspec/examples/google_compute_global_network_endpoint_group/google_compute_global_network_endpoint_group_attributes.erb index 1bb8d4266..346da98ea 100644 --- a/mmv1/templates/inspec/examples/google_compute_global_network_endpoint_group/google_compute_global_network_endpoint_group_attributes.erb +++ b/mmv1/templates/inspec/examples/google_compute_global_network_endpoint_group/google_compute_global_network_endpoint_group_attributes.erb @@ -1,2 +1,3 @@ gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') -accelerator_type = input('accelerator_type', value: <%= JSON.pretty_generate(grab_attributes(pwd)['name']) -%>, description: 'The accelerator type') \ No newline at end of file + + global_network_endpoint_group = input('global_network_endpoint_group', value: <%= JSON.pretty_generate(grab_attributes(pwd)['global_network_endpoint_group']) -%>, description: 'global_network_endpoint_group description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_compute_global_network_endpoint_group/google_compute_global_network_endpoint_groups.erb b/mmv1/templates/inspec/examples/google_compute_global_network_endpoint_group/google_compute_global_network_endpoint_groups.erb index c60111029..a30292610 100644 --- a/mmv1/templates/inspec/examples/google_compute_global_network_endpoint_group/google_compute_global_network_endpoint_groups.erb +++ b/mmv1/templates/inspec/examples/google_compute_global_network_endpoint_group/google_compute_global_network_endpoint_groups.erb @@ -1,5 +1,5 @@ <% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> -describe google_compute_accelerator_types(project: <%= gcp_project_id -%>, zone: 'us-east1-b') do -it { should exist } -it { should be_up } -end \ No newline at end of file + <% global_network_endpoint_group = grab_attributes(pwd)['global_network_endpoint_group'] -%> + describe google_compute_global_network_endpoint_groups(project: <%= gcp_project_id -%>) do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_compute_image_family_view/google_compute_image_family_view.erb b/mmv1/templates/inspec/examples/google_compute_image_family_view/google_compute_image_family_view.erb index 931ab49bb..d059478dd 100644 --- a/mmv1/templates/inspec/examples/google_compute_image_family_view/google_compute_image_family_view.erb +++ b/mmv1/templates/inspec/examples/google_compute_image_family_view/google_compute_image_family_view.erb @@ -1,9 +1,9 @@ <% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> <% image_family_views = grab_attributes(pwd)['image_family_views'] -%> + describe google_compute_image_family_view(project: <%= gcp_project_id -%>, zone: <%= doc_generation ? "'#{image_family_views['zone']}'" : "image_family_views['zone']" -%>, name: <%= doc_generation ? "'#{image_family_views['family']}'" : "image_family_views['family']" -%>) do it { should exist } - it { should be_up } its('image_name') { should eq <%= doc_generation ? "'#{image_family_views['name']}'" : "image_family_views['name']" -%> } its('image_source_type') { should eq <%= doc_generation ? "'#{image_family_views['source_type']}'" : "image_family_views['source_type']" -%> } its('image_family') { should eq <%= doc_generation ? "'#{image_family_views['family']}'" : "image_family_views['family']" -%> } -end \ No newline at end of file +end diff --git a/mmv1/templates/inspec/examples/google_compute_image_family_view/google_compute_image_family_views.erb b/mmv1/templates/inspec/examples/google_compute_image_family_view/google_compute_image_family_views.erb deleted file mode 100644 index 931ab49bb..000000000 --- a/mmv1/templates/inspec/examples/google_compute_image_family_view/google_compute_image_family_views.erb +++ /dev/null @@ -1,9 +0,0 @@ -<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> -<% image_family_views = grab_attributes(pwd)['image_family_views'] -%> -describe google_compute_image_family_view(project: <%= gcp_project_id -%>, zone: <%= doc_generation ? "'#{image_family_views['zone']}'" : "image_family_views['zone']" -%>, name: <%= doc_generation ? "'#{image_family_views['family']}'" : "image_family_views['family']" -%>) do - it { should exist } - it { should be_up } - its('image_name') { should eq <%= doc_generation ? "'#{image_family_views['name']}'" : "image_family_views['name']" -%> } - its('image_source_type') { should eq <%= doc_generation ? "'#{image_family_views['source_type']}'" : "image_family_views['source_type']" -%> } - its('image_family') { should eq <%= doc_generation ? "'#{image_family_views['family']}'" : "image_family_views['family']" -%> } -end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_compute_interconnect_attachment/google_compute_interconnect_attachment.erb b/mmv1/templates/inspec/examples/google_compute_interconnect_attachment/google_compute_interconnect_attachment.erb new file mode 100644 index 000000000..82b976990 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_compute_interconnect_attachment/google_compute_interconnect_attachment.erb @@ -0,0 +1,38 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% interconnect_attachment = grab_attributes(pwd)['interconnect_attachment'] -%> +describe google_compute_interconnect_attachment(interconnectAttachment: <%= doc_generation ? "' #{interconnect_attachment['interconnectAttachment']}'":"interconnect_attachment['interconnectAttachment']" -%>, project: <%= gcp_project_id -%>, region: <%= doc_generation ? "' #{interconnect_attachment['region']}'":"interconnect_attachment['region']" -%>) do + it { should exist } + its('kind') { should cmp <%= doc_generation ? "'#{interconnect_attachment['kind']}'" : "interconnect_attachment['kind']" -%> } + its('description') { should cmp <%= doc_generation ? "'#{interconnect_attachment['description']}'" : "interconnect_attachment['description']" -%> } + its('self_link') { should cmp <%= doc_generation ? "'#{interconnect_attachment['self_link']}'" : "interconnect_attachment['self_link']" -%> } + its('self_link_with_id') { should cmp <%= doc_generation ? "'#{interconnect_attachment['self_link_with_id']}'" : "interconnect_attachment['self_link_with_id']" -%> } + its('id') { should cmp <%= doc_generation ? "'#{interconnect_attachment['id']}'" : "interconnect_attachment['id']" -%> } + its('creation_timestamp') { should cmp <%= doc_generation ? "'#{interconnect_attachment['creation_timestamp']}'" : "interconnect_attachment['creation_timestamp']" -%> } + its('name') { should cmp <%= doc_generation ? "'#{interconnect_attachment['name']}'" : "interconnect_attachment['name']" -%> } + its('interconnect') { should cmp <%= doc_generation ? "'#{interconnect_attachment['interconnect']}'" : "interconnect_attachment['interconnect']" -%> } + its('router') { should cmp <%= doc_generation ? "'#{interconnect_attachment['router']}'" : "interconnect_attachment['router']" -%> } + its('region') { should cmp <%= doc_generation ? "'#{interconnect_attachment['region']}'" : "interconnect_attachment['region']" -%> } + its('google_reference_id') { should cmp <%= doc_generation ? "'#{interconnect_attachment['google_reference_id']}'" : "interconnect_attachment['google_reference_id']" -%> } + its('operational_status') { should cmp <%= doc_generation ? "'#{interconnect_attachment['operational_status']}'" : "interconnect_attachment['operational_status']" -%> } + its('cloud_router_ip_address') { should cmp <%= doc_generation ? "'#{interconnect_attachment['cloud_router_ip_address']}'" : "interconnect_attachment['cloud_router_ip_address']" -%> } + its('customer_router_ip_address') { should cmp <%= doc_generation ? "'#{interconnect_attachment['customer_router_ip_address']}'" : "interconnect_attachment['customer_router_ip_address']" -%> } + its('type') { should cmp <%= doc_generation ? "'#{interconnect_attachment['type']}'" : "interconnect_attachment['type']" -%> } + its('pairing_key') { should cmp <%= doc_generation ? "'#{interconnect_attachment['pairing_key']}'" : "interconnect_attachment['pairing_key']" -%> } + its('edge_availability_domain') { should cmp <%= doc_generation ? "'#{interconnect_attachment['edge_availability_domain']}'" : "interconnect_attachment['edge_availability_domain']" -%> } + its('bandwidth') { should cmp <%= doc_generation ? "'#{interconnect_attachment['bandwidth']}'" : "interconnect_attachment['bandwidth']" -%> } + its('label_fingerprint') { should cmp <%= doc_generation ? "'#{interconnect_attachment['label_fingerprint']}'" : "interconnect_attachment['label_fingerprint']" -%> } + its('state') { should cmp <%= doc_generation ? "'#{interconnect_attachment['state']}'" : "interconnect_attachment['state']" -%> } + its('partner_asn') { should cmp <%= doc_generation ? "'#{interconnect_attachment['partner_asn']}'" : "interconnect_attachment['partner_asn']" -%> } + its('encryption') { should cmp <%= doc_generation ? "'#{interconnect_attachment['encryption']}'" : "interconnect_attachment['encryption']" -%> } + its('stack_type') { should cmp <%= doc_generation ? "'#{interconnect_attachment['stack_type']}'" : "interconnect_attachment['stack_type']" -%> } + its('cloud_router_ipv6address') { should cmp <%= doc_generation ? "'#{interconnect_attachment['cloud_router_ipv6address']}'" : "interconnect_attachment['cloud_router_ipv6address']" -%> } + its('customer_router_ipv6address') { should cmp <%= doc_generation ? "'#{interconnect_attachment['customer_router_ipv6address']}'" : "interconnect_attachment['customer_router_ipv6address']" -%> } + its('cloud_router_ipv6interface_id') { should cmp <%= doc_generation ? "'#{interconnect_attachment['cloud_router_ipv6interface_id']}'" : "interconnect_attachment['cloud_router_ipv6interface_id']" -%> } + its('customer_router_ipv6interface_id') { should cmp <%= doc_generation ? "'#{interconnect_attachment['customer_router_ipv6interface_id']}'" : "interconnect_attachment['customer_router_ipv6interface_id']" -%> } + its('remote_service') { should cmp <%= doc_generation ? "'#{interconnect_attachment['remote_service']}'" : "interconnect_attachment['remote_service']" -%> } + +end + +describe google_compute_interconnect_attachment(interconnectAttachment: <%= doc_generation ? "' #{interconnect_attachment['interconnectAttachment']}'":"interconnect_attachment['interconnectAttachment']" -%>, project: <%= gcp_project_id -%>, region: <%= doc_generation ? "' #{interconnect_attachment['region']}'":"interconnect_attachment['region']" -%>) do + it { should_not exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_compute_interconnect_attachment/google_compute_interconnect_attachment_attributes.erb b/mmv1/templates/inspec/examples/google_compute_interconnect_attachment/google_compute_interconnect_attachment_attributes.erb new file mode 100644 index 000000000..e4bba1e51 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_compute_interconnect_attachment/google_compute_interconnect_attachment_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + interconnect_attachment = input('interconnect_attachment', value: <%= JSON.pretty_generate(grab_attributes(pwd)['interconnect_attachment']) -%>, description: 'interconnect_attachment description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_compute_interconnect_attachment/google_compute_interconnect_attachments.erb b/mmv1/templates/inspec/examples/google_compute_interconnect_attachment/google_compute_interconnect_attachments.erb new file mode 100644 index 000000000..00bbf18c7 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_compute_interconnect_attachment/google_compute_interconnect_attachments.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% interconnect_attachment = grab_attributes(pwd)['interconnect_attachment'] -%> + describe google_compute_interconnect_attachments(project: <%= gcp_project_id -%>, region: <%= doc_generation ? "' #{interconnect_attachment['region']}'":"interconnect_attachment['region']" -%>) do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_compute_license/google_compute_license.erb b/mmv1/templates/inspec/examples/google_compute_license/google_compute_license.erb new file mode 100644 index 000000000..dcd7b0a4a --- /dev/null +++ b/mmv1/templates/inspec/examples/google_compute_license/google_compute_license.erb @@ -0,0 +1,17 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% license = grab_attributes(pwd)['license'] -%> +describe google_compute_v1_license(name: <%= doc_generation ? "' #{license['name']}'":"license['name']" -%>, project: <%= gcp_project_id -%>) do + it { should exist } + its('kind') { should cmp <%= doc_generation ? "'#{license['kind']}'" : "license['kind']" -%> } + its('name') { should cmp <%= doc_generation ? "'#{license['name']}'" : "license['name']" -%> } + its('id') { should cmp <%= doc_generation ? "'#{license['id']}'" : "license['id']" -%> } + its('license_code') { should cmp <%= doc_generation ? "'#{license['license_code']}'" : "license['license_code']" -%> } + its('creation_timestamp') { should cmp <%= doc_generation ? "'#{license['creation_timestamp']}'" : "license['creation_timestamp']" -%> } + its('description') { should cmp <%= doc_generation ? "'#{license['description']}'" : "license['description']" -%> } + its('self_link') { should cmp <%= doc_generation ? "'#{license['self_link']}'" : "license['self_link']" -%> } + +end + +describe google_compute_v1_license(name: "does_not_exit", project: <%= gcp_project_id -%>) do + it { should_not exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_compute_license/google_compute_license_attributes.erb b/mmv1/templates/inspec/examples/google_compute_license/google_compute_license_attributes.erb new file mode 100644 index 000000000..31bf9967d --- /dev/null +++ b/mmv1/templates/inspec/examples/google_compute_license/google_compute_license_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + license = input('license', value: <%= JSON.pretty_generate(grab_attributes(pwd)['license']) -%>, description: 'license description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_compute_license/google_compute_licenses.erb b/mmv1/templates/inspec/examples/google_compute_license/google_compute_licenses.erb new file mode 100644 index 000000000..689f6df19 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_compute_license/google_compute_licenses.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% license = grab_attributes(pwd)['license'] -%> + describe google_compute_v1_licenses(project: <%= gcp_project_id -%>) do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_compute_machine_image/google_compute_machine_image.erb b/mmv1/templates/inspec/examples/google_compute_machine_image/google_compute_machine_image.erb new file mode 100644 index 000000000..6c3fffd08 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_compute_machine_image/google_compute_machine_image.erb @@ -0,0 +1,19 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% machine_image = grab_attributes(pwd)['machine_image'] -%> +describe google_compute_v1_machine_image(machineImage: <%= doc_generation ? "' #{machine_image['machineImage']}'":"machine_image['machineImage']" -%>, project: <%= gcp_project_id -%>) do + it { should exist } + its('kind') { should cmp <%= doc_generation ? "'#{machine_image['kind']}'" : "machine_image['kind']" -%> } + its('id') { should cmp <%= doc_generation ? "'#{machine_image['id']}'" : "machine_image['id']" -%> } + its('creation_timestamp') { should cmp <%= doc_generation ? "'#{machine_image['creation_timestamp']}'" : "machine_image['creation_timestamp']" -%> } + its('name') { should cmp <%= doc_generation ? "'#{machine_image['name']}'" : "machine_image['name']" -%> } + its('description') { should cmp <%= doc_generation ? "'#{machine_image['description']}'" : "machine_image['description']" -%> } + its('self_link') { should cmp <%= doc_generation ? "'#{machine_image['self_link']}'" : "machine_image['self_link']" -%> } + its('source_instance') { should cmp <%= doc_generation ? "'#{machine_image['source_instance']}'" : "machine_image['source_instance']" -%> } + its('status') { should cmp <%= doc_generation ? "'#{machine_image['status']}'" : "machine_image['status']" -%> } + its('total_storage_bytes') { should cmp <%= doc_generation ? "'#{machine_image['total_storage_bytes']}'" : "machine_image['total_storage_bytes']" -%> } + +end + +describe google_compute_v1_machine_image(machineImage: <%= doc_generation ? "' #{machine_image['machineImage']}'":"machine_image['machineImage']" -%>, project: <%= gcp_project_id -%>) do + it { should_not exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_compute_machine_image/google_compute_machine_image_attributes.erb b/mmv1/templates/inspec/examples/google_compute_machine_image/google_compute_machine_image_attributes.erb new file mode 100644 index 000000000..09604cd28 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_compute_machine_image/google_compute_machine_image_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + machine_image = input('machine_image', value: <%= JSON.pretty_generate(grab_attributes(pwd)['machine_image']) -%>, description: 'machine_image description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_compute_machine_image/google_compute_machine_images.erb b/mmv1/templates/inspec/examples/google_compute_machine_image/google_compute_machine_images.erb new file mode 100644 index 000000000..d171d00dc --- /dev/null +++ b/mmv1/templates/inspec/examples/google_compute_machine_image/google_compute_machine_images.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% machine_image = grab_attributes(pwd)['machine_image'] -%> + describe google_compute_v1_machine_images(project: <%= gcp_project_id -%>) do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_compute_machine_type/google_compute_machine_type.erb b/mmv1/templates/inspec/examples/google_compute_machine_type/google_compute_machine_type.erb new file mode 100644 index 000000000..de851a125 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_compute_machine_type/google_compute_machine_type.erb @@ -0,0 +1,18 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% machine_type = grab_attributes(pwd)['machine_type'] -%> +describe google_compute_machine_type(machineType: <%= doc_generation ? "' #{machine_type['machineType']}'":"machine_type['machineType']" -%>, project: <%= gcp_project_id -%>, zone: <%= doc_generation ? "' #{machine_type['zone']}'":"machine_type['zone']" -%>) do + it { should exist } + its('kind') { should cmp <%= doc_generation ? "'#{machine_type['kind']}'" : "machine_type['kind']" -%> } + its('id') { should cmp <%= doc_generation ? "'#{machine_type['id']}'" : "machine_type['id']" -%> } + its('creation_timestamp') { should cmp <%= doc_generation ? "'#{machine_type['creation_timestamp']}'" : "machine_type['creation_timestamp']" -%> } + its('name') { should cmp <%= doc_generation ? "'#{machine_type['name']}'" : "machine_type['name']" -%> } + its('description') { should cmp <%= doc_generation ? "'#{machine_type['description']}'" : "machine_type['description']" -%> } + its('maximum_persistent_disks_size_gb') { should cmp <%= doc_generation ? "'#{machine_type['maximum_persistent_disks_size_gb']}'" : "machine_type['maximum_persistent_disks_size_gb']" -%> } + its('zone') { should cmp <%= doc_generation ? "'#{machine_type['zone']}'" : "machine_type['zone']" -%> } + its('self_link') { should cmp <%= doc_generation ? "'#{machine_type['self_link']}'" : "machine_type['self_link']" -%> } + +end + +describe google_compute_machine_type(machineType: <%= doc_generation ? "' #{machine_type['machineType']}'":"machine_type['machineType']" -%>, project: <%= gcp_project_id -%>, zone: <%= doc_generation ? "' #{machine_type['zone']}'":"machine_type['zone']" -%>) do + it { should_not exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_compute_machine_type/google_compute_machine_type_attributes.erb b/mmv1/templates/inspec/examples/google_compute_machine_type/google_compute_machine_type_attributes.erb new file mode 100644 index 000000000..fbfc88526 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_compute_machine_type/google_compute_machine_type_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + machine_type = input('machine_type', value: <%= JSON.pretty_generate(grab_attributes(pwd)['machine_type']) -%>, description: 'machine_type description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_compute_machine_type/google_compute_machine_types.erb b/mmv1/templates/inspec/examples/google_compute_machine_type/google_compute_machine_types.erb new file mode 100644 index 000000000..5394aa03e --- /dev/null +++ b/mmv1/templates/inspec/examples/google_compute_machine_type/google_compute_machine_types.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% machine_type = grab_attributes(pwd)['machine_type'] -%> + describe google_compute_machine_types(project: <%= gcp_project_id -%>, zone: <%= doc_generation ? "' #{machine_type['zone']}'":"machine_type['zone']" -%>) do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_compute_network_attachment/google_compute_network_attachment.erb b/mmv1/templates/inspec/examples/google_compute_network_attachment/google_compute_network_attachment.erb new file mode 100644 index 000000000..dc3bd7f79 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_compute_network_attachment/google_compute_network_attachment.erb @@ -0,0 +1,21 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% network_attachment = grab_attributes(pwd)['network_attachment'] -%> +describe google_compute_v1_network_attachment(networkAttachment: <%= doc_generation ? "' #{network_attachment['networkAttachment']}'":"network_attachment['networkAttachment']" -%>, project: <%= gcp_project_id -%>, region: <%= doc_generation ? "' #{network_attachment['region']}'":"network_attachment['region']" -%>) do + it { should exist } + its('kind') { should cmp <%= doc_generation ? "'#{network_attachment['kind']}'" : "network_attachment['kind']" -%> } + its('id') { should cmp <%= doc_generation ? "'#{network_attachment['id']}'" : "network_attachment['id']" -%> } + its('creation_timestamp') { should cmp <%= doc_generation ? "'#{network_attachment['creation_timestamp']}'" : "network_attachment['creation_timestamp']" -%> } + its('name') { should cmp <%= doc_generation ? "'#{network_attachment['name']}'" : "network_attachment['name']" -%> } + its('description') { should cmp <%= doc_generation ? "'#{network_attachment['description']}'" : "network_attachment['description']" -%> } + its('self_link') { should cmp <%= doc_generation ? "'#{network_attachment['self_link']}'" : "network_attachment['self_link']" -%> } + its('self_link_with_id') { should cmp <%= doc_generation ? "'#{network_attachment['self_link_with_id']}'" : "network_attachment['self_link_with_id']" -%> } + its('region') { should cmp <%= doc_generation ? "'#{network_attachment['region']}'" : "network_attachment['region']" -%> } + its('connection_preference') { should cmp <%= doc_generation ? "'#{network_attachment['connection_preference']}'" : "network_attachment['connection_preference']" -%> } + its('fingerprint') { should cmp <%= doc_generation ? "'#{network_attachment['fingerprint']}'" : "network_attachment['fingerprint']" -%> } + its('network') { should cmp <%= doc_generation ? "'#{network_attachment['network']}'" : "network_attachment['network']" -%> } + +end + +describe google_compute_v1_network_attachment(networkAttachment: <%= doc_generation ? "' #{network_attachment['networkAttachment']}'":"network_attachment['networkAttachment']" -%>, project: <%= gcp_project_id -%>, region: <%= doc_generation ? "' #{network_attachment['region']}'":"network_attachment['region']" -%>) do + it { should_not exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_compute_network_attachment/google_compute_network_attachment_attributes.erb b/mmv1/templates/inspec/examples/google_compute_network_attachment/google_compute_network_attachment_attributes.erb new file mode 100644 index 000000000..439f2f76b --- /dev/null +++ b/mmv1/templates/inspec/examples/google_compute_network_attachment/google_compute_network_attachment_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + network_attachment = input('network_attachment', value: <%= JSON.pretty_generate(grab_attributes(pwd)['network_attachment']) -%>, description: 'network_attachment description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_compute_network_attachment/google_compute_network_attachments.erb b/mmv1/templates/inspec/examples/google_compute_network_attachment/google_compute_network_attachments.erb new file mode 100644 index 000000000..cd4660fc5 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_compute_network_attachment/google_compute_network_attachments.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% network_attachment = grab_attributes(pwd)['network_attachment'] -%> + describe google_compute_v1_network_attachments(project: <%= gcp_project_id -%>, region: <%= doc_generation ? "' #{network_attachment['region']}'":"network_attachment['region']" -%>) do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_compute_network_edge_security_service/google_compute_network_edge_security_service.erb b/mmv1/templates/inspec/examples/google_compute_network_edge_security_service/google_compute_network_edge_security_service.erb new file mode 100644 index 000000000..b136a6bc9 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_compute_network_edge_security_service/google_compute_network_edge_security_service.erb @@ -0,0 +1,20 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% network_edge_security_service = grab_attributes(pwd)['network_edge_security_service'] -%> +describe google_compute_v1_network_edge_security_service(networkEdgeSecurityService: <%= doc_generation ? "' #{network_edge_security_service['networkEdgeSecurityService']}'":"network_edge_security_service['networkEdgeSecurityService']" -%>, project: <%= gcp_project_id -%>, region: <%= doc_generation ? "' #{network_edge_security_service['region']}'":"network_edge_security_service['region']" -%>) do + it { should exist } + its('kind') { should cmp <%= doc_generation ? "'#{network_edge_security_service['kind']}'" : "network_edge_security_service['kind']" -%> } + its('id') { should cmp <%= doc_generation ? "'#{network_edge_security_service['id']}'" : "network_edge_security_service['id']" -%> } + its('creation_timestamp') { should cmp <%= doc_generation ? "'#{network_edge_security_service['creation_timestamp']}'" : "network_edge_security_service['creation_timestamp']" -%> } + its('name') { should cmp <%= doc_generation ? "'#{network_edge_security_service['name']}'" : "network_edge_security_service['name']" -%> } + its('description') { should cmp <%= doc_generation ? "'#{network_edge_security_service['description']}'" : "network_edge_security_service['description']" -%> } + its('self_link') { should cmp <%= doc_generation ? "'#{network_edge_security_service['self_link']}'" : "network_edge_security_service['self_link']" -%> } + its('self_link_with_id') { should cmp <%= doc_generation ? "'#{network_edge_security_service['self_link_with_id']}'" : "network_edge_security_service['self_link_with_id']" -%> } + its('region') { should cmp <%= doc_generation ? "'#{network_edge_security_service['region']}'" : "network_edge_security_service['region']" -%> } + its('fingerprint') { should cmp <%= doc_generation ? "'#{network_edge_security_service['fingerprint']}'" : "network_edge_security_service['fingerprint']" -%> } + its('security_policy') { should cmp <%= doc_generation ? "'#{network_edge_security_service['security_policy']}'" : "network_edge_security_service['security_policy']" -%> } + +end + +describe google_compute_v1_network_edge_security_service(networkEdgeSecurityService: <%= doc_generation ? "' #{network_edge_security_service['networkEdgeSecurityService']}'":"network_edge_security_service['networkEdgeSecurityService']" -%>, project: <%= gcp_project_id -%>, region: <%= doc_generation ? "' #{network_edge_security_service['region']}'":"network_edge_security_service['region']" -%>) do + it { should_not exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_compute_network_edge_security_service/google_compute_network_edge_security_service_attributes.erb b/mmv1/templates/inspec/examples/google_compute_network_edge_security_service/google_compute_network_edge_security_service_attributes.erb new file mode 100644 index 000000000..a9f1f8cbe --- /dev/null +++ b/mmv1/templates/inspec/examples/google_compute_network_edge_security_service/google_compute_network_edge_security_service_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + network_edge_security_service = input('network_edge_security_service', value: <%= JSON.pretty_generate(grab_attributes(pwd)['network_edge_security_service']) -%>, description: 'network_edge_security_service description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_compute_network_edge_security_service/google_compute_network_edge_security_services.erb b/mmv1/templates/inspec/examples/google_compute_network_edge_security_service/google_compute_network_edge_security_services.erb new file mode 100644 index 000000000..69e5bf578 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_compute_network_edge_security_service/google_compute_network_edge_security_services.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% network_edge_security_service = grab_attributes(pwd)['network_edge_security_service'] -%> + describe google_compute_v1_network_edge_security_services() do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_compute_node_type/google_compute_node_type.erb b/mmv1/templates/inspec/examples/google_compute_node_type/google_compute_node_type.erb new file mode 100644 index 000000000..0873d2c51 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_compute_node_type/google_compute_node_type.erb @@ -0,0 +1,18 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% node_type = grab_attributes(pwd)['node_type'] -%> +describe google_compute_v1_node_type(nodeType: <%= doc_generation ? "' #{node_type['nodeType']}'":"node_type['nodeType']" -%>, project: <%= gcp_project_id -%>, zone: <%= doc_generation ? "' #{node_type['zone']}'":"node_type['zone']" -%>) do + it { should exist } + its('kind') { should cmp <%= doc_generation ? "'#{node_type['kind']}'" : "node_type['kind']" -%> } + its('id') { should cmp <%= doc_generation ? "'#{node_type['id']}'" : "node_type['id']" -%> } + its('creation_timestamp') { should cmp <%= doc_generation ? "'#{node_type['creation_timestamp']}'" : "node_type['creation_timestamp']" -%> } + its('name') { should cmp <%= doc_generation ? "'#{node_type['name']}'" : "node_type['name']" -%> } + its('description') { should cmp <%= doc_generation ? "'#{node_type['description']}'" : "node_type['description']" -%> } + its('cpu_platform') { should cmp <%= doc_generation ? "'#{node_type['cpu_platform']}'" : "node_type['cpu_platform']" -%> } + its('zone') { should cmp <%= doc_generation ? "'#{node_type['zone']}'" : "node_type['zone']" -%> } + its('self_link') { should cmp <%= doc_generation ? "'#{node_type['self_link']}'" : "node_type['self_link']" -%> } + +end + +describe google_compute_v1_node_type(nodeType: <%= doc_generation ? "' #{node_type['nodeType']}'":"node_type['nodeType']" -%>, project: <%= gcp_project_id -%>, zone: <%= doc_generation ? "' #{node_type['zone']}'":"node_type['zone']" -%>) do + it { should_not exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_compute_node_type/google_compute_node_type_attributes.erb b/mmv1/templates/inspec/examples/google_compute_node_type/google_compute_node_type_attributes.erb new file mode 100644 index 000000000..027af66b5 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_compute_node_type/google_compute_node_type_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + node_type = input('node_type', value: <%= JSON.pretty_generate(grab_attributes(pwd)['node_type']) -%>, description: 'node_type description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_compute_node_type/google_compute_node_types.erb b/mmv1/templates/inspec/examples/google_compute_node_type/google_compute_node_types.erb new file mode 100644 index 000000000..7450ea306 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_compute_node_type/google_compute_node_types.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% node_type = grab_attributes(pwd)['node_type'] -%> + describe google_compute_v1_node_types(project: <%= gcp_project_id -%>, zone: <%= doc_generation ? "' #{node_type['zone']}'":"node_type['zone']" -%>) do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_compute_packet_mirroring/google_compute_packet_mirroring.erb b/mmv1/templates/inspec/examples/google_compute_packet_mirroring/google_compute_packet_mirroring.erb new file mode 100644 index 000000000..f16368b38 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_compute_packet_mirroring/google_compute_packet_mirroring.erb @@ -0,0 +1,19 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% packet_mirroring = grab_attributes(pwd)['packet_mirroring'] -%> +describe google_compute_packet_mirroring(packetMirroring: <%= doc_generation ? "' #{packet_mirroring['packetMirroring']}'":"packet_mirroring['packetMirroring']" -%>, project: <%= gcp_project_id -%>, region: <%= doc_generation ? "' #{packet_mirroring['region']}'":"packet_mirroring['region']" -%>) do + it { should exist } + its('kind') { should cmp <%= doc_generation ? "'#{packet_mirroring['kind']}'" : "packet_mirroring['kind']" -%> } + its('id') { should cmp <%= doc_generation ? "'#{packet_mirroring['id']}'" : "packet_mirroring['id']" -%> } + its('creation_timestamp') { should cmp <%= doc_generation ? "'#{packet_mirroring['creation_timestamp']}'" : "packet_mirroring['creation_timestamp']" -%> } + its('self_link') { should cmp <%= doc_generation ? "'#{packet_mirroring['self_link']}'" : "packet_mirroring['self_link']" -%> } + its('self_link_with_id') { should cmp <%= doc_generation ? "'#{packet_mirroring['self_link_with_id']}'" : "packet_mirroring['self_link_with_id']" -%> } + its('name') { should cmp <%= doc_generation ? "'#{packet_mirroring['name']}'" : "packet_mirroring['name']" -%> } + its('description') { should cmp <%= doc_generation ? "'#{packet_mirroring['description']}'" : "packet_mirroring['description']" -%> } + its('region') { should cmp <%= doc_generation ? "'#{packet_mirroring['region']}'" : "packet_mirroring['region']" -%> } + its('enable') { should cmp <%= doc_generation ? "'#{packet_mirroring['enable']}'" : "packet_mirroring['enable']" -%> } + +end + +describe google_compute_packet_mirroring(packetMirroring: <%= doc_generation ? "' #{packet_mirroring['packetMirroring']}'":"packet_mirroring['packetMirroring']" -%>, project: <%= gcp_project_id -%>, region: <%= doc_generation ? "' #{packet_mirroring['region']}'":"packet_mirroring['region']" -%>) do + it { should_not exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_compute_packet_mirroring/google_compute_packet_mirroring_attributes.erb b/mmv1/templates/inspec/examples/google_compute_packet_mirroring/google_compute_packet_mirroring_attributes.erb new file mode 100644 index 000000000..8bffe6720 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_compute_packet_mirroring/google_compute_packet_mirroring_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + packet_mirroring = input('packet_mirroring', value: <%= JSON.pretty_generate(grab_attributes(pwd)['packet_mirroring']) -%>, description: 'packet_mirroring description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_compute_packet_mirroring/google_compute_packet_mirrorings.erb b/mmv1/templates/inspec/examples/google_compute_packet_mirroring/google_compute_packet_mirrorings.erb new file mode 100644 index 000000000..c174cafb6 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_compute_packet_mirroring/google_compute_packet_mirrorings.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% packet_mirroring = grab_attributes(pwd)['packet_mirroring'] -%> + describe google_compute_packet_mirrorings(project: <%= gcp_project_id -%>, region: <%= doc_generation ? "' #{packet_mirroring['region']}'":"packet_mirroring['region']" -%>) do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_compute_public_advertised_prefix/google_compute_public_advertised_prefix.erb b/mmv1/templates/inspec/examples/google_compute_public_advertised_prefix/google_compute_public_advertised_prefix.erb new file mode 100644 index 000000000..6ab007e8c --- /dev/null +++ b/mmv1/templates/inspec/examples/google_compute_public_advertised_prefix/google_compute_public_advertised_prefix.erb @@ -0,0 +1,24 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% public_advertised_prefix = grab_attributes(pwd)['public_advertised_prefix'] -%> +describe google_compute_public_advertised_prefix(project: <%= gcp_project_id -%>, publicAdvertisedPrefix: <%= doc_generation ? "' #{public_advertised_prefix['publicAdvertisedPrefix']}'":"public_advertised_prefix['publicAdvertisedPrefix']" -%>) do + it { should exist } + its('kind') { should cmp <%= doc_generation ? "'#{public_advertised_prefix['kind']}'" : "public_advertised_prefix['kind']" -%> } + its('id') { should cmp <%= doc_generation ? "'#{public_advertised_prefix['id']}'" : "public_advertised_prefix['id']" -%> } + its('creation_timestamp') { should cmp <%= doc_generation ? "'#{public_advertised_prefix['creation_timestamp']}'" : "public_advertised_prefix['creation_timestamp']" -%> } + its('name') { should cmp <%= doc_generation ? "'#{public_advertised_prefix['name']}'" : "public_advertised_prefix['name']" -%> } + its('description') { should cmp <%= doc_generation ? "'#{public_advertised_prefix['description']}'" : "public_advertised_prefix['description']" -%> } + its('self_link') { should cmp <%= doc_generation ? "'#{public_advertised_prefix['self_link']}'" : "public_advertised_prefix['self_link']" -%> } + its('self_link_with_id') { should cmp <%= doc_generation ? "'#{public_advertised_prefix['self_link_with_id']}'" : "public_advertised_prefix['self_link_with_id']" -%> } + its('ip_cidr_range') { should cmp <%= doc_generation ? "'#{public_advertised_prefix['ip_cidr_range']}'" : "public_advertised_prefix['ip_cidr_range']" -%> } + its('dns_verification_ip') { should cmp <%= doc_generation ? "'#{public_advertised_prefix['dns_verification_ip']}'" : "public_advertised_prefix['dns_verification_ip']" -%> } + its('shared_secret') { should cmp <%= doc_generation ? "'#{public_advertised_prefix['shared_secret']}'" : "public_advertised_prefix['shared_secret']" -%> } + its('status') { should cmp <%= doc_generation ? "'#{public_advertised_prefix['status']}'" : "public_advertised_prefix['status']" -%> } + its('pdp_scope') { should cmp <%= doc_generation ? "'#{public_advertised_prefix['pdp_scope']}'" : "public_advertised_prefix['pdp_scope']" -%> } + its('fingerprint') { should cmp <%= doc_generation ? "'#{public_advertised_prefix['fingerprint']}'" : "public_advertised_prefix['fingerprint']" -%> } + its('byoip_api_version') { should cmp <%= doc_generation ? "'#{public_advertised_prefix['byoip_api_version']}'" : "public_advertised_prefix['byoip_api_version']" -%> } + +end + +describe google_compute_public_advertised_prefix(project: <%= gcp_project_id -%>, publicAdvertisedPrefix: <%= doc_generation ? "' #{public_advertised_prefix['publicAdvertisedPrefix']}'":"public_advertised_prefix['publicAdvertisedPrefix']" -%>) do + it { should_not exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_compute_public_advertised_prefix/google_compute_public_advertised_prefix_attributes.erb b/mmv1/templates/inspec/examples/google_compute_public_advertised_prefix/google_compute_public_advertised_prefix_attributes.erb new file mode 100644 index 000000000..65049eb8b --- /dev/null +++ b/mmv1/templates/inspec/examples/google_compute_public_advertised_prefix/google_compute_public_advertised_prefix_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + public_advertised_prefix = input('public_advertised_prefix', value: <%= JSON.pretty_generate(grab_attributes(pwd)['public_advertised_prefix']) -%>, description: 'public_advertised_prefix description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_compute_public_advertised_prefix/google_compute_public_advertised_prefixes.erb b/mmv1/templates/inspec/examples/google_compute_public_advertised_prefix/google_compute_public_advertised_prefixes.erb new file mode 100644 index 000000000..e0be7444c --- /dev/null +++ b/mmv1/templates/inspec/examples/google_compute_public_advertised_prefix/google_compute_public_advertised_prefixes.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% public_advertised_prefix = grab_attributes(pwd)['public_advertised_prefix'] -%> + describe google_compute_public_advertised_prefixes(project: <%= gcp_project_id -%>) do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_compute_region_autoscaler/google_compute_region_autoscaler.erb b/mmv1/templates/inspec/examples/google_compute_region_autoscaler/google_compute_region_autoscaler.erb new file mode 100644 index 000000000..dd12dca7d --- /dev/null +++ b/mmv1/templates/inspec/examples/google_compute_region_autoscaler/google_compute_region_autoscaler.erb @@ -0,0 +1,21 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% region_autoscaler = grab_attributes(pwd)['region_autoscaler'] -%> +describe google_compute_region_autoscaler(name: <%= doc_generation ? "' #{region_autoscaler['name']}'":"region_autoscaler['name']" -%>, project: <%= gcp_project_id -%>, region: <%= doc_generation ? "' #{region_autoscaler['region']}'":"region_autoscaler['region']" -%>) do + it { should exist } + its('kind') { should cmp <%= doc_generation ? "'#{region_autoscaler['kind']}'" : "region_autoscaler['kind']" -%> } + its('id') { should cmp <%= doc_generation ? "'#{region_autoscaler['id']}'" : "region_autoscaler['id']" -%> } + its('creation_timestamp') { should cmp <%= doc_generation ? "'#{region_autoscaler['creation_timestamp']}'" : "region_autoscaler['creation_timestamp']" -%> } + its('name') { should cmp <%= doc_generation ? "'#{region_autoscaler['name']}'" : "region_autoscaler['name']" -%> } + its('description') { should cmp <%= doc_generation ? "'#{region_autoscaler['description']}'" : "region_autoscaler['description']" -%> } + its('target') { should cmp <%= doc_generation ? "'#{region_autoscaler['target']}'" : "region_autoscaler['target']" -%> } + its('zone') { should cmp <%= doc_generation ? "'#{region_autoscaler['zone']}'" : "region_autoscaler['zone']" -%> } + its('region') { should cmp <%= doc_generation ? "'#{region_autoscaler['region']}'" : "region_autoscaler['region']" -%> } + its('self_link') { should cmp <%= doc_generation ? "'#{region_autoscaler['self_link']}'" : "region_autoscaler['self_link']" -%> } + its('self_link_with_id') { should cmp <%= doc_generation ? "'#{region_autoscaler['self_link_with_id']}'" : "region_autoscaler['self_link_with_id']" -%> } + its('status') { should cmp <%= doc_generation ? "'#{region_autoscaler['status']}'" : "region_autoscaler['status']" -%> } + +end + +describe google_compute_region_autoscaler(autoscaler: <%= doc_generation ? "' #{region_autoscaler['autoscaler']}'":"region_autoscaler['autoscaler']" -%>, project: <%= gcp_project_id -%>, region: <%= doc_generation ? "' #{region_autoscaler['region']}'":"region_autoscaler['region']" -%>) do + it { should_not exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_compute_region_autoscaler/google_compute_region_autoscaler_attributes.erb b/mmv1/templates/inspec/examples/google_compute_region_autoscaler/google_compute_region_autoscaler_attributes.erb new file mode 100644 index 000000000..2b5b3469e --- /dev/null +++ b/mmv1/templates/inspec/examples/google_compute_region_autoscaler/google_compute_region_autoscaler_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + region_autoscaler = input('region_autoscaler', value: <%= JSON.pretty_generate(grab_attributes(pwd)['region_autoscaler']) -%>, description: 'region_autoscaler description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_compute_region_autoscaler/google_compute_region_autoscalers.erb b/mmv1/templates/inspec/examples/google_compute_region_autoscaler/google_compute_region_autoscalers.erb new file mode 100644 index 000000000..636bc67f5 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_compute_region_autoscaler/google_compute_region_autoscalers.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% region_autoscaler = grab_attributes(pwd)['region_autoscaler'] -%> + describe google_compute_region_autoscalers(project: <%= gcp_project_id -%>, region: <%= doc_generation ? "' #{region_autoscaler['region']}'":"region_autoscaler['region']" -%>) do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_compute_region_disk_type/google_compute_region_disk_type.erb b/mmv1/templates/inspec/examples/google_compute_region_disk_type/google_compute_region_disk_type.erb new file mode 100644 index 000000000..abd5491ff --- /dev/null +++ b/mmv1/templates/inspec/examples/google_compute_region_disk_type/google_compute_region_disk_type.erb @@ -0,0 +1,20 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% region_disk_type = grab_attributes(pwd)['region_disk_type'] -%> +describe google_compute_region_disk_type(diskType: <%= doc_generation ? "' #{region_disk_type['diskType']}'":"region_disk_type['diskType']" -%>, project: <%= gcp_project_id -%>, region: <%= doc_generation ? "' #{region_disk_type['region']}'":"region_disk_type['region']" -%>) do + it { should exist } + its('kind') { should cmp <%= doc_generation ? "'#{region_disk_type['kind']}'" : "region_disk_type['kind']" -%> } + its('id') { should cmp <%= doc_generation ? "'#{region_disk_type['id']}'" : "region_disk_type['id']" -%> } + its('creation_timestamp') { should cmp <%= doc_generation ? "'#{region_disk_type['creation_timestamp']}'" : "region_disk_type['creation_timestamp']" -%> } + its('name') { should cmp <%= doc_generation ? "'#{region_disk_type['name']}'" : "region_disk_type['name']" -%> } + its('description') { should cmp <%= doc_generation ? "'#{region_disk_type['description']}'" : "region_disk_type['description']" -%> } + its('valid_disk_size') { should cmp <%= doc_generation ? "'#{region_disk_type['valid_disk_size']}'" : "region_disk_type['valid_disk_size']" -%> } + its('zone') { should cmp <%= doc_generation ? "'#{region_disk_type['zone']}'" : "region_disk_type['zone']" -%> } + its('self_link') { should cmp <%= doc_generation ? "'#{region_disk_type['self_link']}'" : "region_disk_type['self_link']" -%> } + its('default_disk_size_gb') { should cmp <%= doc_generation ? "'#{region_disk_type['default_disk_size_gb']}'" : "region_disk_type['default_disk_size_gb']" -%> } + its('region') { should cmp <%= doc_generation ? "'#{region_disk_type['region']}'" : "region_disk_type['region']" -%> } + +end + +describe google_compute_region_disk_type(diskType: <%= doc_generation ? "' #{region_disk_type['diskType']}'":"region_disk_type['diskType']" -%>, project: <%= gcp_project_id -%>, region: <%= doc_generation ? "' #{region_disk_type['region']}'":"region_disk_type['region']" -%>) do + it { should_not exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_compute_region_disk_type/google_compute_region_disk_type_attributes.erb b/mmv1/templates/inspec/examples/google_compute_region_disk_type/google_compute_region_disk_type_attributes.erb new file mode 100644 index 000000000..1ec34d856 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_compute_region_disk_type/google_compute_region_disk_type_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + region_disk_type = input('region_disk_type', value: <%= JSON.pretty_generate(grab_attributes(pwd)['region_disk_type']) -%>, description: 'region_disk_type description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_compute_region_disk_type/google_compute_region_disk_types.erb b/mmv1/templates/inspec/examples/google_compute_region_disk_type/google_compute_region_disk_types.erb new file mode 100644 index 000000000..637fb68e1 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_compute_region_disk_type/google_compute_region_disk_types.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% region_disk_type = grab_attributes(pwd)['region_disk_type'] -%> + describe google_compute_region_disk_types(project: <%= gcp_project_id -%>, region: <%= doc_generation ? "' #{region_disk_type['region']}'":"region_disk_type['region']" -%>) do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_compute_region_network_endpoint_group/google_compute_region_network_endpoint_group.erb b/mmv1/templates/inspec/examples/google_compute_region_network_endpoint_group/google_compute_region_network_endpoint_group.erb new file mode 100644 index 000000000..5c9a3a084 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_compute_region_network_endpoint_group/google_compute_region_network_endpoint_group.erb @@ -0,0 +1,22 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% region_network_endpoint_group = grab_attributes(pwd)['region_network_endpoint_group'] -%> +describe google_compute_region_network_endpoint_group(networkEndpointGroup: <%= doc_generation ? "' #{region_network_endpoint_group['networkEndpointGroup']}'":"region_network_endpoint_group['networkEndpointGroup']" -%>, project: <%= gcp_project_id -%>, region: <%= doc_generation ? "' #{region_network_endpoint_group['region']}'":"region_network_endpoint_group['region']" -%>) do + it { should exist } + its('kind') { should cmp <%= doc_generation ? "'#{region_network_endpoint_group['kind']}'" : "region_network_endpoint_group['kind']" -%> } + its('id') { should cmp <%= doc_generation ? "'#{region_network_endpoint_group['id']}'" : "region_network_endpoint_group['id']" -%> } + its('creation_timestamp') { should cmp <%= doc_generation ? "'#{region_network_endpoint_group['creation_timestamp']}'" : "region_network_endpoint_group['creation_timestamp']" -%> } + its('self_link') { should cmp <%= doc_generation ? "'#{region_network_endpoint_group['self_link']}'" : "region_network_endpoint_group['self_link']" -%> } + its('name') { should cmp <%= doc_generation ? "'#{region_network_endpoint_group['name']}'" : "region_network_endpoint_group['name']" -%> } + its('description') { should cmp <%= doc_generation ? "'#{region_network_endpoint_group['description']}'" : "region_network_endpoint_group['description']" -%> } + its('network_endpoint_type') { should cmp <%= doc_generation ? "'#{region_network_endpoint_group['network_endpoint_type']}'" : "region_network_endpoint_group['network_endpoint_type']" -%> } + its('region') { should cmp <%= doc_generation ? "'#{region_network_endpoint_group['region']}'" : "region_network_endpoint_group['region']" -%> } + its('zone') { should cmp <%= doc_generation ? "'#{region_network_endpoint_group['zone']}'" : "region_network_endpoint_group['zone']" -%> } + its('network') { should cmp <%= doc_generation ? "'#{region_network_endpoint_group['network']}'" : "region_network_endpoint_group['network']" -%> } + its('subnetwork') { should cmp <%= doc_generation ? "'#{region_network_endpoint_group['subnetwork']}'" : "region_network_endpoint_group['subnetwork']" -%> } + its('psc_target_service') { should cmp <%= doc_generation ? "'#{region_network_endpoint_group['psc_target_service']}'" : "region_network_endpoint_group['psc_target_service']" -%> } + +end + +describe google_compute_region_network_endpoint_group(networkEndpointGroup: <%= doc_generation ? "' #{region_network_endpoint_group['networkEndpointGroup']}'":"region_network_endpoint_group['networkEndpointGroup']" -%>, project: <%= gcp_project_id -%>, region: <%= doc_generation ? "' #{region_network_endpoint_group['region']}'":"region_network_endpoint_group['region']" -%>) do + it { should_not exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_compute_region_network_endpoint_group/google_compute_region_network_endpoint_group_attributes.erb b/mmv1/templates/inspec/examples/google_compute_region_network_endpoint_group/google_compute_region_network_endpoint_group_attributes.erb new file mode 100644 index 000000000..198e2ddaf --- /dev/null +++ b/mmv1/templates/inspec/examples/google_compute_region_network_endpoint_group/google_compute_region_network_endpoint_group_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + region_network_endpoint_group = input('region_network_endpoint_group', value: <%= JSON.pretty_generate(grab_attributes(pwd)['region_network_endpoint_group']) -%>, description: 'region_network_endpoint_group description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_compute_region_network_endpoint_group/google_compute_region_network_endpoint_groups.erb b/mmv1/templates/inspec/examples/google_compute_region_network_endpoint_group/google_compute_region_network_endpoint_groups.erb new file mode 100644 index 000000000..bade16f7a --- /dev/null +++ b/mmv1/templates/inspec/examples/google_compute_region_network_endpoint_group/google_compute_region_network_endpoint_groups.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% region_network_endpoint_group = grab_attributes(pwd)['region_network_endpoint_group'] -%> + describe google_compute_region_network_endpoint_groups(project: <%= gcp_project_id -%>, region: <%= doc_generation ? "' #{region_network_endpoint_group['region']}'":"region_network_endpoint_group['region']" -%>) do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_compute_region_notification_endpoint/google_compute_region_notification_endpoint.erb b/mmv1/templates/inspec/examples/google_compute_region_notification_endpoint/google_compute_region_notification_endpoint.erb new file mode 100644 index 000000000..da4db3606 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_compute_region_notification_endpoint/google_compute_region_notification_endpoint.erb @@ -0,0 +1,17 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% region_notification_endpoint = grab_attributes(pwd)['region_notification_endpoint'] -%> +describe google_compute_region_notification_endpoint(notificationEndpoint: <%= doc_generation ? "' #{region_notification_endpoint['notificationEndpoint']}'":"region_notification_endpoint['notificationEndpoint']" -%>, project: <%= gcp_project_id -%>, region: <%= doc_generation ? "' #{region_notification_endpoint['region']}'":"region_notification_endpoint['region']" -%>) do + it { should exist } + its('kind') { should cmp <%= doc_generation ? "'#{region_notification_endpoint['kind']}'" : "region_notification_endpoint['kind']" -%> } + its('id') { should cmp <%= doc_generation ? "'#{region_notification_endpoint['id']}'" : "region_notification_endpoint['id']" -%> } + its('creation_timestamp') { should cmp <%= doc_generation ? "'#{region_notification_endpoint['creation_timestamp']}'" : "region_notification_endpoint['creation_timestamp']" -%> } + its('name') { should cmp <%= doc_generation ? "'#{region_notification_endpoint['name']}'" : "region_notification_endpoint['name']" -%> } + its('description') { should cmp <%= doc_generation ? "'#{region_notification_endpoint['description']}'" : "region_notification_endpoint['description']" -%> } + its('self_link') { should cmp <%= doc_generation ? "'#{region_notification_endpoint['self_link']}'" : "region_notification_endpoint['self_link']" -%> } + its('region') { should cmp <%= doc_generation ? "'#{region_notification_endpoint['region']}'" : "region_notification_endpoint['region']" -%> } + +end + +describe google_compute_region_notification_endpoint(notificationEndpoint: <%= doc_generation ? "' #{region_notification_endpoint['notificationEndpoint']}'":"region_notification_endpoint['notificationEndpoint']" -%>, project: <%= gcp_project_id -%>, region: <%= doc_generation ? "' #{region_notification_endpoint['region']}'":"region_notification_endpoint['region']" -%>) do + it { should_not exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_compute_region_notification_endpoint/google_compute_region_notification_endpoint_attributes.erb b/mmv1/templates/inspec/examples/google_compute_region_notification_endpoint/google_compute_region_notification_endpoint_attributes.erb new file mode 100644 index 000000000..c51574efd --- /dev/null +++ b/mmv1/templates/inspec/examples/google_compute_region_notification_endpoint/google_compute_region_notification_endpoint_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + region_notification_endpoint = input('region_notification_endpoint', value: <%= JSON.pretty_generate(grab_attributes(pwd)['region_notification_endpoint']) -%>, description: 'region_notification_endpoint description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_compute_region_notification_endpoint/google_compute_region_notification_endpoints.erb b/mmv1/templates/inspec/examples/google_compute_region_notification_endpoint/google_compute_region_notification_endpoints.erb new file mode 100644 index 000000000..d162631d3 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_compute_region_notification_endpoint/google_compute_region_notification_endpoints.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% region_notification_endpoint = grab_attributes(pwd)['region_notification_endpoint'] -%> + describe google_compute_region_notification_endpoints(project: <%= gcp_project_id -%>, region: <%= doc_generation ? "' #{region_notification_endpoint['region']}'":"region_notification_endpoint['region']" -%>) do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_compute_region_security_policy/google_compute_region_security_policies.erb b/mmv1/templates/inspec/examples/google_compute_region_security_policy/google_compute_region_security_policies.erb new file mode 100644 index 000000000..06f03e7c1 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_compute_region_security_policy/google_compute_region_security_policies.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% region_security_policy = grab_attributes(pwd)['region_security_policy'] -%> + describe google_compute_region_security_policies(project: <%= gcp_project_id -%>, region: <%= doc_generation ? "' #{region_security_policy['region']}'":"region_security_policy['region']" -%>) do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_compute_region_security_policy/google_compute_region_security_policy.erb b/mmv1/templates/inspec/examples/google_compute_region_security_policy/google_compute_region_security_policy.erb new file mode 100644 index 000000000..927a27174 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_compute_region_security_policy/google_compute_region_security_policy.erb @@ -0,0 +1,23 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% region_security_policy = grab_attributes(pwd)['region_security_policy'] -%> +describe google_compute_region_security_policy(project: <%= gcp_project_id -%>, region: <%= doc_generation ? "' #{region_security_policy['region']}'":"region_security_policy['region']" -%>, securityPolicy: <%= doc_generation ? "' #{region_security_policy['securityPolicy']}'":"region_security_policy['securityPolicy']" -%>) do + it { should exist } + its('kind') { should cmp <%= doc_generation ? "'#{region_security_policy['kind']}'" : "region_security_policy['kind']" -%> } + its('id') { should cmp <%= doc_generation ? "'#{region_security_policy['id']}'" : "region_security_policy['id']" -%> } + its('creation_timestamp') { should cmp <%= doc_generation ? "'#{region_security_policy['creation_timestamp']}'" : "region_security_policy['creation_timestamp']" -%> } + its('name') { should cmp <%= doc_generation ? "'#{region_security_policy['name']}'" : "region_security_policy['name']" -%> } + its('description') { should cmp <%= doc_generation ? "'#{region_security_policy['description']}'" : "region_security_policy['description']" -%> } + its('fingerprint') { should cmp <%= doc_generation ? "'#{region_security_policy['fingerprint']}'" : "region_security_policy['fingerprint']" -%> } + its('self_link') { should cmp <%= doc_generation ? "'#{region_security_policy['self_link']}'" : "region_security_policy['self_link']" -%> } + its('self_link_with_id') { should cmp <%= doc_generation ? "'#{region_security_policy['self_link_with_id']}'" : "region_security_policy['self_link_with_id']" -%> } + its('type') { should cmp <%= doc_generation ? "'#{region_security_policy['type']}'" : "region_security_policy['type']" -%> } + its('label_fingerprint') { should cmp <%= doc_generation ? "'#{region_security_policy['label_fingerprint']}'" : "region_security_policy['label_fingerprint']" -%> } + its('display_name') { should cmp <%= doc_generation ? "'#{region_security_policy['display_name']}'" : "region_security_policy['display_name']" -%> } + its('parent') { should cmp <%= doc_generation ? "'#{region_security_policy['parent']}'" : "region_security_policy['parent']" -%> } + its('region') { should cmp <%= doc_generation ? "'#{region_security_policy['region']}'" : "region_security_policy['region']" -%> } + +end + +describe google_compute_region_security_policy(project: <%= gcp_project_id -%>, region: <%= doc_generation ? "' #{region_security_policy['region']}'":"region_security_policy['region']" -%>, securityPolicy: <%= doc_generation ? "' #{region_security_policy['securityPolicy']}'":"region_security_policy['securityPolicy']" -%>) do + it { should_not exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_compute_region_security_policy/google_compute_region_security_policy_attributes.erb b/mmv1/templates/inspec/examples/google_compute_region_security_policy/google_compute_region_security_policy_attributes.erb new file mode 100644 index 000000000..3cc59193b --- /dev/null +++ b/mmv1/templates/inspec/examples/google_compute_region_security_policy/google_compute_region_security_policy_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + region_security_policy = input('region_security_policy', value: <%= JSON.pretty_generate(grab_attributes(pwd)['region_security_policy']) -%>, description: 'region_security_policy description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_compute_region_ssl_policy/google_compute_region_ssl_policies.erb b/mmv1/templates/inspec/examples/google_compute_region_ssl_policy/google_compute_region_ssl_policies.erb new file mode 100644 index 000000000..839a52d4f --- /dev/null +++ b/mmv1/templates/inspec/examples/google_compute_region_ssl_policy/google_compute_region_ssl_policies.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% region_ssl_policy = grab_attributes(pwd)['region_ssl_policy'] -%> + describe google_compute_region_ssl_policies(project: <%= gcp_project_id -%>, region: <%= doc_generation ? "' #{region_ssl_policy['region']}'":"region_ssl_policy['region']" -%>) do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_compute_region_ssl_policy/google_compute_region_ssl_policy.erb b/mmv1/templates/inspec/examples/google_compute_region_ssl_policy/google_compute_region_ssl_policy.erb new file mode 100644 index 000000000..795780e95 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_compute_region_ssl_policy/google_compute_region_ssl_policy.erb @@ -0,0 +1,21 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% region_ssl_policy = grab_attributes(pwd)['region_ssl_policy'] -%> +describe google_compute_region_ssl_policy(project: <%= gcp_project_id -%>, region: <%= doc_generation ? "' #{region_ssl_policy['region']}'":"region_ssl_policy['region']" -%>, sslPolicy: <%= doc_generation ? "' #{region_ssl_policy['sslPolicy']}'":"region_ssl_policy['sslPolicy']" -%>) do + it { should exist } + its('kind') { should cmp <%= doc_generation ? "'#{region_ssl_policy['kind']}'" : "region_ssl_policy['kind']" -%> } + its('id') { should cmp <%= doc_generation ? "'#{region_ssl_policy['id']}'" : "region_ssl_policy['id']" -%> } + its('creation_timestamp') { should cmp <%= doc_generation ? "'#{region_ssl_policy['creation_timestamp']}'" : "region_ssl_policy['creation_timestamp']" -%> } + its('self_link') { should cmp <%= doc_generation ? "'#{region_ssl_policy['self_link']}'" : "region_ssl_policy['self_link']" -%> } + its('self_link_with_id') { should cmp <%= doc_generation ? "'#{region_ssl_policy['self_link_with_id']}'" : "region_ssl_policy['self_link_with_id']" -%> } + its('name') { should cmp <%= doc_generation ? "'#{region_ssl_policy['name']}'" : "region_ssl_policy['name']" -%> } + its('description') { should cmp <%= doc_generation ? "'#{region_ssl_policy['description']}'" : "region_ssl_policy['description']" -%> } + its('profile') { should cmp <%= doc_generation ? "'#{region_ssl_policy['profile']}'" : "region_ssl_policy['profile']" -%> } + its('min_tls_version') { should cmp <%= doc_generation ? "'#{region_ssl_policy['min_tls_version']}'" : "region_ssl_policy['min_tls_version']" -%> } + its('fingerprint') { should cmp <%= doc_generation ? "'#{region_ssl_policy['fingerprint']}'" : "region_ssl_policy['fingerprint']" -%> } + its('region') { should cmp <%= doc_generation ? "'#{region_ssl_policy['region']}'" : "region_ssl_policy['region']" -%> } + +end + +describe google_compute_region_ssl_policy(project: <%= gcp_project_id -%>, region: <%= doc_generation ? "' #{region_ssl_policy['region']}'":"region_ssl_policy['region']" -%>, sslPolicy: <%= doc_generation ? "' #{region_ssl_policy['sslPolicy']}'":"region_ssl_policy['sslPolicy']" -%>) do + it { should_not exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_compute_region_ssl_policy/google_compute_region_ssl_policy_attributes.erb b/mmv1/templates/inspec/examples/google_compute_region_ssl_policy/google_compute_region_ssl_policy_attributes.erb new file mode 100644 index 000000000..c7aa3f684 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_compute_region_ssl_policy/google_compute_region_ssl_policy_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + region_ssl_policy = input('region_ssl_policy', value: <%= JSON.pretty_generate(grab_attributes(pwd)['region_ssl_policy']) -%>, description: 'region_ssl_policy description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_compute_reservation/google_compute_reservation.erb b/mmv1/templates/inspec/examples/google_compute_reservation/google_compute_reservation.erb new file mode 100644 index 000000000..65f8dc529 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_compute_reservation/google_compute_reservation.erb @@ -0,0 +1,21 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% reservation = grab_attributes(pwd)['reservation'] -%> +describe google_compute_reservation(project: <%= gcp_project_id -%>, name: <%= doc_generation ? "' #{reservation['name']}'":"reservation['name']" -%>, zone: <%= doc_generation ? "' #{reservation['zone']}'":"reservation['zone']" -%>) do + it { should exist } + its('kind') { should cmp <%= doc_generation ? "'#{reservation['kind']}'" : "reservation['kind']" -%> } + its('id') { should cmp <%= doc_generation ? "'#{reservation['id']}'" : "reservation['id']" -%> } + its('creation_timestamp') { should cmp <%= doc_generation ? "'#{reservation['creation_timestamp']}'" : "reservation['creation_timestamp']" -%> } + its('self_link') { should cmp <%= doc_generation ? "'#{reservation['self_link']}'" : "reservation['self_link']" -%> } + its('self_link_with_id') { should cmp <%= doc_generation ? "'#{reservation['self_link_with_id']}'" : "reservation['self_link_with_id']" -%> } + its('zone') { should cmp <%= doc_generation ? "'#{reservation['zone']}'" : "reservation['zone']" -%> } + its('description') { should cmp <%= doc_generation ? "'#{reservation['description']}'" : "reservation['description']" -%> } + its('name') { should cmp <%= doc_generation ? "'#{reservation['name']}'" : "reservation['name']" -%> } + its('commitment') { should cmp <%= doc_generation ? "'#{reservation['commitment']}'" : "reservation['commitment']" -%> } + its('status') { should cmp <%= doc_generation ? "'#{reservation['status']}'" : "reservation['status']" -%> } + its('delete_at_time') { should cmp <%= doc_generation ? "'#{reservation['delete_at_time']}'" : "reservation['delete_at_time']" -%> } + +end + +describe google_compute_reservation(project: <%= gcp_project_id -%>, name: "does_not_exit", zone: <%= doc_generation ? "' #{reservation['zone']}'":"reservation['zone']" -%>) do + it { should_not exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_compute_reservation/google_compute_reservation_attributes.erb b/mmv1/templates/inspec/examples/google_compute_reservation/google_compute_reservation_attributes.erb new file mode 100644 index 000000000..d5c5db1a1 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_compute_reservation/google_compute_reservation_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + reservation = input('reservation', value: <%= JSON.pretty_generate(grab_attributes(pwd)['reservation']) -%>, description: 'reservation description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_compute_reservation/google_compute_reservations.erb b/mmv1/templates/inspec/examples/google_compute_reservation/google_compute_reservations.erb new file mode 100644 index 000000000..aca75113c --- /dev/null +++ b/mmv1/templates/inspec/examples/google_compute_reservation/google_compute_reservations.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% reservation = grab_attributes(pwd)['reservation'] -%> + describe google_compute_reservations(project: <%= gcp_project_id -%>, zone: <%= doc_generation ? "' #{reservation['zone']}'":"reservation['zone']" -%>) do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_compute_resource_policy/google_compute_resource_policies.erb b/mmv1/templates/inspec/examples/google_compute_resource_policy/google_compute_resource_policies.erb new file mode 100644 index 000000000..eb2fafb7e --- /dev/null +++ b/mmv1/templates/inspec/examples/google_compute_resource_policy/google_compute_resource_policies.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% resource_policy = grab_attributes(pwd)['resource_policy'] -%> + describe google_compute_resource_policies(project: <%= gcp_project_id -%>, region: <%= doc_generation ? "' #{resource_policy['region']}'":"resource_policy['region']" -%>) do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_compute_resource_policy/google_compute_resource_policy.erb b/mmv1/templates/inspec/examples/google_compute_resource_policy/google_compute_resource_policy.erb new file mode 100644 index 000000000..94cdad833 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_compute_resource_policy/google_compute_resource_policy.erb @@ -0,0 +1,19 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% resource_policy = grab_attributes(pwd)['resource_policy'] -%> +describe google_compute_resource_policy(project: <%= gcp_project_id -%>, region: <%= doc_generation ? "' #{resource_policy['region']}'":"resource_policy['region']" -%>, resourcePolicy: <%= doc_generation ? "' #{resource_policy['resourcePolicy']}'":"resource_policy['resourcePolicy']" -%>) do + it { should exist } + its('kind') { should cmp <%= doc_generation ? "'#{resource_policy['kind']}'" : "resource_policy['kind']" -%> } + its('id') { should cmp <%= doc_generation ? "'#{resource_policy['id']}'" : "resource_policy['id']" -%> } + its('creation_timestamp') { should cmp <%= doc_generation ? "'#{resource_policy['creation_timestamp']}'" : "resource_policy['creation_timestamp']" -%> } + its('self_link') { should cmp <%= doc_generation ? "'#{resource_policy['self_link']}'" : "resource_policy['self_link']" -%> } + its('self_link_with_id') { should cmp <%= doc_generation ? "'#{resource_policy['self_link_with_id']}'" : "resource_policy['self_link_with_id']" -%> } + its('region') { should cmp <%= doc_generation ? "'#{resource_policy['region']}'" : "resource_policy['region']" -%> } + its('description') { should cmp <%= doc_generation ? "'#{resource_policy['description']}'" : "resource_policy['description']" -%> } + its('name') { should cmp <%= doc_generation ? "'#{resource_policy['name']}'" : "resource_policy['name']" -%> } + its('status') { should cmp <%= doc_generation ? "'#{resource_policy['status']}'" : "resource_policy['status']" -%> } + +end + +describe google_compute_resource_policy(project: <%= gcp_project_id -%>, region: <%= doc_generation ? "' #{resource_policy['region']}'":"resource_policy['region']" -%>, resourcePolicy: <%= doc_generation ? "' #{resource_policy['resourcePolicy']}'":"resource_policy['resourcePolicy']" -%>) do + it { should_not exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_compute_resource_policy/google_compute_resource_policy_attributes.erb b/mmv1/templates/inspec/examples/google_compute_resource_policy/google_compute_resource_policy_attributes.erb new file mode 100644 index 000000000..64d44c202 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_compute_resource_policy/google_compute_resource_policy_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + resource_policy = input('resource_policy', value: <%= JSON.pretty_generate(grab_attributes(pwd)['resource_policy']) -%>, description: 'resource_policy description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_compute_target_grpc_proxy/google_compute_target_grpc_proxies.erb b/mmv1/templates/inspec/examples/google_compute_target_grpc_proxy/google_compute_target_grpc_proxies.erb new file mode 100644 index 000000000..5144acbbd --- /dev/null +++ b/mmv1/templates/inspec/examples/google_compute_target_grpc_proxy/google_compute_target_grpc_proxies.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% target_grpc_proxy = grab_attributes(pwd)['target_grpc_proxy'] -%> + describe google_compute_target_grpc_proxies(project: <%= gcp_project_id -%>) do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_compute_target_grpc_proxy/google_compute_target_grpc_proxy.erb b/mmv1/templates/inspec/examples/google_compute_target_grpc_proxy/google_compute_target_grpc_proxy.erb new file mode 100644 index 000000000..2a5126d4a --- /dev/null +++ b/mmv1/templates/inspec/examples/google_compute_target_grpc_proxy/google_compute_target_grpc_proxy.erb @@ -0,0 +1,19 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% target_grpc_proxy = grab_attributes(pwd)['target_grpc_proxy'] -%> +describe google_compute_target_grpc_proxy(project: <%= gcp_project_id -%>, targetGrpcProxy: <%= doc_generation ? "' #{target_grpc_proxy['targetGrpcProxy']}'":"target_grpc_proxy['targetGrpcProxy']" -%>) do + it { should exist } + its('kind') { should cmp <%= doc_generation ? "'#{target_grpc_proxy['kind']}'" : "target_grpc_proxy['kind']" -%> } + its('id') { should cmp <%= doc_generation ? "'#{target_grpc_proxy['id']}'" : "target_grpc_proxy['id']" -%> } + its('creation_timestamp') { should cmp <%= doc_generation ? "'#{target_grpc_proxy['creation_timestamp']}'" : "target_grpc_proxy['creation_timestamp']" -%> } + its('name') { should cmp <%= doc_generation ? "'#{target_grpc_proxy['name']}'" : "target_grpc_proxy['name']" -%> } + its('description') { should cmp <%= doc_generation ? "'#{target_grpc_proxy['description']}'" : "target_grpc_proxy['description']" -%> } + its('self_link') { should cmp <%= doc_generation ? "'#{target_grpc_proxy['self_link']}'" : "target_grpc_proxy['self_link']" -%> } + its('self_link_with_id') { should cmp <%= doc_generation ? "'#{target_grpc_proxy['self_link_with_id']}'" : "target_grpc_proxy['self_link_with_id']" -%> } + its('url_map') { should cmp <%= doc_generation ? "'#{target_grpc_proxy['url_map']}'" : "target_grpc_proxy['url_map']" -%> } + its('fingerprint') { should cmp <%= doc_generation ? "'#{target_grpc_proxy['fingerprint']}'" : "target_grpc_proxy['fingerprint']" -%> } + +end + +describe google_compute_target_grpc_proxy(project: <%= gcp_project_id -%>, targetGrpcProxy: <%= doc_generation ? "' #{target_grpc_proxy['targetGrpcProxy']}'":"target_grpc_proxy['targetGrpcProxy']" -%>) do + it { should_not exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_compute_target_grpc_proxy/google_compute_target_grpc_proxy_attributes.erb b/mmv1/templates/inspec/examples/google_compute_target_grpc_proxy/google_compute_target_grpc_proxy_attributes.erb new file mode 100644 index 000000000..acb9e5f2f --- /dev/null +++ b/mmv1/templates/inspec/examples/google_compute_target_grpc_proxy/google_compute_target_grpc_proxy_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + target_grpc_proxy = input('target_grpc_proxy', value: <%= JSON.pretty_generate(grab_attributes(pwd)['target_grpc_proxy']) -%>, description: 'target_grpc_proxy description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_compute_target_ssl_proxy/google_compute_target_ssl_proxies.erb b/mmv1/templates/inspec/examples/google_compute_target_ssl_proxy/google_compute_target_ssl_proxies.erb new file mode 100644 index 000000000..e8708d2c2 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_compute_target_ssl_proxy/google_compute_target_ssl_proxies.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% target_ssl_proxy = grab_attributes(pwd)['target_ssl_proxy'] -%> + describe google_compute_target_ssl_proxies(project: <%= gcp_project_id -%>) do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_compute_target_ssl_proxy/google_compute_target_ssl_proxy.erb b/mmv1/templates/inspec/examples/google_compute_target_ssl_proxy/google_compute_target_ssl_proxy.erb new file mode 100644 index 000000000..d9a3a256a --- /dev/null +++ b/mmv1/templates/inspec/examples/google_compute_target_ssl_proxy/google_compute_target_ssl_proxy.erb @@ -0,0 +1,20 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% target_ssl_proxy = grab_attributes(pwd)['target_ssl_proxy'] -%> +describe google_compute_target_ssl_proxy(project: <%= gcp_project_id -%>, targetSslProxy: <%= doc_generation ? "' #{target_ssl_proxy['targetSslProxy']}'":"target_ssl_proxy['targetSslProxy']" -%>) do + it { should exist } + its('kind') { should cmp <%= doc_generation ? "'#{target_ssl_proxy['kind']}'" : "target_ssl_proxy['kind']" -%> } + its('id') { should cmp <%= doc_generation ? "'#{target_ssl_proxy['id']}'" : "target_ssl_proxy['id']" -%> } + its('creation_timestamp') { should cmp <%= doc_generation ? "'#{target_ssl_proxy['creation_timestamp']}'" : "target_ssl_proxy['creation_timestamp']" -%> } + its('name') { should cmp <%= doc_generation ? "'#{target_ssl_proxy['name']}'" : "target_ssl_proxy['name']" -%> } + its('description') { should cmp <%= doc_generation ? "'#{target_ssl_proxy['description']}'" : "target_ssl_proxy['description']" -%> } + its('self_link') { should cmp <%= doc_generation ? "'#{target_ssl_proxy['self_link']}'" : "target_ssl_proxy['self_link']" -%> } + its('service') { should cmp <%= doc_generation ? "'#{target_ssl_proxy['service']}'" : "target_ssl_proxy['service']" -%> } + its('certificate_map') { should cmp <%= doc_generation ? "'#{target_ssl_proxy['certificate_map']}'" : "target_ssl_proxy['certificate_map']" -%> } + its('proxy_header') { should cmp <%= doc_generation ? "'#{target_ssl_proxy['proxy_header']}'" : "target_ssl_proxy['proxy_header']" -%> } + its('ssl_policy') { should cmp <%= doc_generation ? "'#{target_ssl_proxy['ssl_policy']}'" : "target_ssl_proxy['ssl_policy']" -%> } + +end + +describe google_compute_target_ssl_proxy(project: <%= gcp_project_id -%>, targetSslProxy: <%= doc_generation ? "' #{target_ssl_proxy['targetSslProxy']}'":"target_ssl_proxy['targetSslProxy']" -%>) do + it { should_not exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_compute_target_ssl_proxy/google_compute_target_ssl_proxy_attributes.erb b/mmv1/templates/inspec/examples/google_compute_target_ssl_proxy/google_compute_target_ssl_proxy_attributes.erb new file mode 100644 index 000000000..99fdd4883 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_compute_target_ssl_proxy/google_compute_target_ssl_proxy_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + target_ssl_proxy = input('target_ssl_proxy', value: <%= JSON.pretty_generate(grab_attributes(pwd)['target_ssl_proxy']) -%>, description: 'target_ssl_proxy description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_compute_target_vpn_gateway/google_compute_target_vpn_gateway.erb b/mmv1/templates/inspec/examples/google_compute_target_vpn_gateway/google_compute_target_vpn_gateway.erb new file mode 100644 index 000000000..4bf826f8e --- /dev/null +++ b/mmv1/templates/inspec/examples/google_compute_target_vpn_gateway/google_compute_target_vpn_gateway.erb @@ -0,0 +1,20 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% target_vpn_gateway = grab_attributes(pwd)['target_vpn_gateway'] -%> +describe google_compute_target_vpn_gateway(project: <%= gcp_project_id -%>, region: <%= doc_generation ? "' #{target_vpn_gateway['region']}'":"target_vpn_gateway['region']" -%>, targetVpnGateway: <%= doc_generation ? "' #{target_vpn_gateway['targetVpnGateway']}'":"target_vpn_gateway['targetVpnGateway']" -%>) do + it { should exist } + its('kind') { should cmp <%= doc_generation ? "'#{target_vpn_gateway['kind']}'" : "target_vpn_gateway['kind']" -%> } + its('id') { should cmp <%= doc_generation ? "'#{target_vpn_gateway['id']}'" : "target_vpn_gateway['id']" -%> } + its('creation_timestamp') { should cmp <%= doc_generation ? "'#{target_vpn_gateway['creation_timestamp']}'" : "target_vpn_gateway['creation_timestamp']" -%> } + its('name') { should cmp <%= doc_generation ? "'#{target_vpn_gateway['name']}'" : "target_vpn_gateway['name']" -%> } + its('description') { should cmp <%= doc_generation ? "'#{target_vpn_gateway['description']}'" : "target_vpn_gateway['description']" -%> } + its('region') { should cmp <%= doc_generation ? "'#{target_vpn_gateway['region']}'" : "target_vpn_gateway['region']" -%> } + its('network') { should cmp <%= doc_generation ? "'#{target_vpn_gateway['network']}'" : "target_vpn_gateway['network']" -%> } + its('status') { should cmp <%= doc_generation ? "'#{target_vpn_gateway['status']}'" : "target_vpn_gateway['status']" -%> } + its('self_link') { should cmp <%= doc_generation ? "'#{target_vpn_gateway['self_link']}'" : "target_vpn_gateway['self_link']" -%> } + its('label_fingerprint') { should cmp <%= doc_generation ? "'#{target_vpn_gateway['label_fingerprint']}'" : "target_vpn_gateway['label_fingerprint']" -%> } + +end + +describe google_compute_target_vpn_gateway(project: <%= gcp_project_id -%>, region: <%= doc_generation ? "' #{target_vpn_gateway['region']}'":"target_vpn_gateway['region']" -%>, targetVpnGateway: <%= doc_generation ? "' #{target_vpn_gateway['targetVpnGateway']}'":"target_vpn_gateway['targetVpnGateway']" -%>) do + it { should_not exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_compute_target_vpn_gateway/google_compute_target_vpn_gateway_attributes.erb b/mmv1/templates/inspec/examples/google_compute_target_vpn_gateway/google_compute_target_vpn_gateway_attributes.erb new file mode 100644 index 000000000..543932fda --- /dev/null +++ b/mmv1/templates/inspec/examples/google_compute_target_vpn_gateway/google_compute_target_vpn_gateway_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + target_vpn_gateway = input('target_vpn_gateway', value: <%= JSON.pretty_generate(grab_attributes(pwd)['target_vpn_gateway']) -%>, description: 'target_vpn_gateway description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_compute_target_vpn_gateway/google_compute_target_vpn_gateways.erb b/mmv1/templates/inspec/examples/google_compute_target_vpn_gateway/google_compute_target_vpn_gateways.erb new file mode 100644 index 000000000..7edf443e6 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_compute_target_vpn_gateway/google_compute_target_vpn_gateways.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% target_vpn_gateway = grab_attributes(pwd)['target_vpn_gateway'] -%> + describe google_compute_target_vpn_gateways(project: <%= gcp_project_id -%>, region: <%= doc_generation ? "' #{target_vpn_gateway['region']}'":"target_vpn_gateway['region']" -%>) do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_compute_vpn_gateway/google_compute_vpn_gateway.erb b/mmv1/templates/inspec/examples/google_compute_vpn_gateway/google_compute_vpn_gateway.erb new file mode 100644 index 000000000..43711dff7 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_compute_vpn_gateway/google_compute_vpn_gateway.erb @@ -0,0 +1,20 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% vpn_gateway = grab_attributes(pwd)['vpn_gateway'] -%> +describe google_compute_vpn_gateway(project: <%= gcp_project_id -%>, region: <%= doc_generation ? "' #{vpn_gateway['region']}'":"vpn_gateway['region']" -%>, vpnGateway: <%= doc_generation ? "' #{vpn_gateway['vpnGateway']}'":"vpn_gateway['vpnGateway']" -%>) do + it { should exist } + its('kind') { should cmp <%= doc_generation ? "'#{vpn_gateway['kind']}'" : "vpn_gateway['kind']" -%> } + its('id') { should cmp <%= doc_generation ? "'#{vpn_gateway['id']}'" : "vpn_gateway['id']" -%> } + its('creation_timestamp') { should cmp <%= doc_generation ? "'#{vpn_gateway['creation_timestamp']}'" : "vpn_gateway['creation_timestamp']" -%> } + its('name') { should cmp <%= doc_generation ? "'#{vpn_gateway['name']}'" : "vpn_gateway['name']" -%> } + its('description') { should cmp <%= doc_generation ? "'#{vpn_gateway['description']}'" : "vpn_gateway['description']" -%> } + its('region') { should cmp <%= doc_generation ? "'#{vpn_gateway['region']}'" : "vpn_gateway['region']" -%> } + its('network') { should cmp <%= doc_generation ? "'#{vpn_gateway['network']}'" : "vpn_gateway['network']" -%> } + its('self_link') { should cmp <%= doc_generation ? "'#{vpn_gateway['self_link']}'" : "vpn_gateway['self_link']" -%> } + its('label_fingerprint') { should cmp <%= doc_generation ? "'#{vpn_gateway['label_fingerprint']}'" : "vpn_gateway['label_fingerprint']" -%> } + its('stack_type') { should cmp <%= doc_generation ? "'#{vpn_gateway['stack_type']}'" : "vpn_gateway['stack_type']" -%> } + +end + +describe google_compute_vpn_gateway(project: <%= gcp_project_id -%>, region: <%= doc_generation ? "' #{vpn_gateway['region']}'":"vpn_gateway['region']" -%>, vpnGateway: <%= doc_generation ? "' #{vpn_gateway['vpnGateway']}'":"vpn_gateway['vpnGateway']" -%>) do + it { should_not exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_compute_vpn_gateway/google_compute_vpn_gateway_attributes.erb b/mmv1/templates/inspec/examples/google_compute_vpn_gateway/google_compute_vpn_gateway_attributes.erb new file mode 100644 index 000000000..d7b7ba471 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_compute_vpn_gateway/google_compute_vpn_gateway_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + vpn_gateway = input('vpn_gateway', value: <%= JSON.pretty_generate(grab_attributes(pwd)['vpn_gateway']) -%>, description: 'vpn_gateway description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_compute_vpn_gateway/google_compute_vpn_gateways.erb b/mmv1/templates/inspec/examples/google_compute_vpn_gateway/google_compute_vpn_gateways.erb new file mode 100644 index 000000000..971cf634e --- /dev/null +++ b/mmv1/templates/inspec/examples/google_compute_vpn_gateway/google_compute_vpn_gateways.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% vpn_gateway = grab_attributes(pwd)['vpn_gateway'] -%> + describe google_compute_vpn_gateways(project: <%= gcp_project_id -%>, region: <%= doc_generation ? "' #{vpn_gateway['region']}'":"vpn_gateway['region']" -%>) do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_data_fusion_instance/google_data_fusion_instance.erb b/mmv1/templates/inspec/examples/google_data_fusion_instance/google_data_fusion_instance.erb new file mode 100644 index 000000000..135343afb --- /dev/null +++ b/mmv1/templates/inspec/examples/google_data_fusion_instance/google_data_fusion_instance.erb @@ -0,0 +1,29 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% instance = grab_attributes(pwd)['instance'] -%> +describe google_data_fusion_instance(name: <%= doc_generation ? "' #{instance['name']}'":"instance['name']" -%>) do + it { should exist } + its('name') { should cmp <%= doc_generation ? "'#{instance['name']}'" : "instance['name']" -%> } + its('description') { should cmp <%= doc_generation ? "'#{instance['description']}'" : "instance['description']" -%> } + its('type') { should cmp <%= doc_generation ? "'#{instance['type']}'" : "instance['type']" -%> } + its('create_time') { should cmp <%= doc_generation ? "'#{instance['create_time']}'" : "instance['create_time']" -%> } + its('update_time') { should cmp <%= doc_generation ? "'#{instance['update_time']}'" : "instance['update_time']" -%> } + its('state') { should cmp <%= doc_generation ? "'#{instance['state']}'" : "instance['state']" -%> } + its('state_message') { should cmp <%= doc_generation ? "'#{instance['state_message']}'" : "instance['state_message']" -%> } + its('service_endpoint') { should cmp <%= doc_generation ? "'#{instance['service_endpoint']}'" : "instance['service_endpoint']" -%> } + its('zone') { should cmp <%= doc_generation ? "'#{instance['zone']}'" : "instance['zone']" -%> } + its('version') { should cmp <%= doc_generation ? "'#{instance['version']}'" : "instance['version']" -%> } + its('service_account') { should cmp <%= doc_generation ? "'#{instance['service_account']}'" : "instance['service_account']" -%> } + its('display_name') { should cmp <%= doc_generation ? "'#{instance['display_name']}'" : "instance['display_name']" -%> } + its('api_endpoint') { should cmp <%= doc_generation ? "'#{instance['api_endpoint']}'" : "instance['api_endpoint']" -%> } + its('gcs_bucket') { should cmp <%= doc_generation ? "'#{instance['gcs_bucket']}'" : "instance['gcs_bucket']" -%> } + its('p4service_account') { should cmp <%= doc_generation ? "'#{instance['p4service_account']}'" : "instance['p4service_account']" -%> } + its('tenant_project_id') { should cmp <%= doc_generation ? "'#{instance['tenant_project_id']}'" : "instance['tenant_project_id']" -%> } + its('dataproc_service_account') { should cmp <%= doc_generation ? "'#{instance['dataproc_service_account']}'" : "instance['dataproc_service_account']" -%> } + its('workforce_identity_service_endpoint') { should cmp <%= doc_generation ? "'#{instance['workforce_identity_service_endpoint']}'" : "instance['workforce_identity_service_endpoint']" -%> } + its('patch_revision') { should cmp <%= doc_generation ? "'#{instance['patch_revision']}'" : "instance['patch_revision']" -%> } + +end + +describe google_data_fusion_instance(name: "does_not_exit") do + it { should_not exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_data_fusion_instance/google_data_fusion_instance_attributes.erb b/mmv1/templates/inspec/examples/google_data_fusion_instance/google_data_fusion_instance_attributes.erb new file mode 100644 index 000000000..c6dbbcebf --- /dev/null +++ b/mmv1/templates/inspec/examples/google_data_fusion_instance/google_data_fusion_instance_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + instance = input('instance', value: <%= JSON.pretty_generate(grab_attributes(pwd)['instance']) -%>, description: 'instance description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_data_fusion_instance/google_data_fusion_instances.erb b/mmv1/templates/inspec/examples/google_data_fusion_instance/google_data_fusion_instances.erb new file mode 100644 index 000000000..9248c82a1 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_data_fusion_instance/google_data_fusion_instances.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% instance = grab_attributes(pwd)['instance'] -%> + describe google_data_fusion_instances(parent: <%= doc_generation ? "' #{instance['parent']}'":"instance['parent']" -%>) do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_dataflow_project_location_job/google_dataflow_project_location_job.erb b/mmv1/templates/inspec/examples/google_dataflow_project_location_job/google_dataflow_project_location_job.erb new file mode 100644 index 000000000..8f2cf341e --- /dev/null +++ b/mmv1/templates/inspec/examples/google_dataflow_project_location_job/google_dataflow_project_location_job.erb @@ -0,0 +1,25 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% project_location_job = grab_attributes(pwd)['project_location_job'] -%> +describe google_dataflow_project_location_job(jobId: <%= doc_generation ? "' #{project_location_job['jobId']}'":"project_location_job['jobId']" -%>, location: <%= doc_generation ? "' #{project_location_job['location']}'":"project_location_job['location']" -%>, projectId: <%= doc_generation ? "' #{project_location_job['projectId']}'":"project_location_job['projectId']" -%>) do + it { should exist } + its('id') { should cmp <%= doc_generation ? "'#{project_location_job['id']}'" : "project_location_job['id']" -%> } + its('project_id') { should cmp <%= doc_generation ? "'#{project_location_job['project_id']}'" : "project_location_job['project_id']" -%> } + its('name') { should cmp <%= doc_generation ? "'#{project_location_job['name']}'" : "project_location_job['name']" -%> } + its('type') { should cmp <%= doc_generation ? "'#{project_location_job['type']}'" : "project_location_job['type']" -%> } + its('steps_location') { should cmp <%= doc_generation ? "'#{project_location_job['steps_location']}'" : "project_location_job['steps_location']" -%> } + its('current_state') { should cmp <%= doc_generation ? "'#{project_location_job['current_state']}'" : "project_location_job['current_state']" -%> } + its('current_state_time') { should cmp <%= doc_generation ? "'#{project_location_job['current_state_time']}'" : "project_location_job['current_state_time']" -%> } + its('requested_state') { should cmp <%= doc_generation ? "'#{project_location_job['requested_state']}'" : "project_location_job['requested_state']" -%> } + its('create_time') { should cmp <%= doc_generation ? "'#{project_location_job['create_time']}'" : "project_location_job['create_time']" -%> } + its('replace_job_id') { should cmp <%= doc_generation ? "'#{project_location_job['replace_job_id']}'" : "project_location_job['replace_job_id']" -%> } + its('client_request_id') { should cmp <%= doc_generation ? "'#{project_location_job['client_request_id']}'" : "project_location_job['client_request_id']" -%> } + its('replaced_by_job_id') { should cmp <%= doc_generation ? "'#{project_location_job['replaced_by_job_id']}'" : "project_location_job['replaced_by_job_id']" -%> } + its('location') { should cmp <%= doc_generation ? "'#{project_location_job['location']}'" : "project_location_job['location']" -%> } + its('start_time') { should cmp <%= doc_generation ? "'#{project_location_job['start_time']}'" : "project_location_job['start_time']" -%> } + its('created_from_snapshot_id') { should cmp <%= doc_generation ? "'#{project_location_job['created_from_snapshot_id']}'" : "project_location_job['created_from_snapshot_id']" -%> } + +end + +describe google_dataflow_project_location_job(jobId: <%= doc_generation ? "' #{project_location_job['jobId']}'":"project_location_job['jobId']" -%>, location: <%= doc_generation ? "' #{project_location_job['location']}'":"project_location_job['location']" -%>, projectId: <%= doc_generation ? "' #{project_location_job['projectId']}'":"project_location_job['projectId']" -%>) do + it { should_not exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_dataflow_project_location_job/google_dataflow_project_location_job_attributes.erb b/mmv1/templates/inspec/examples/google_dataflow_project_location_job/google_dataflow_project_location_job_attributes.erb new file mode 100644 index 000000000..4c508c321 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_dataflow_project_location_job/google_dataflow_project_location_job_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + project_location_job = input('project_location_job', value: <%= JSON.pretty_generate(grab_attributes(pwd)['project_location_job']) -%>, description: 'project_location_job description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_dataflow_project_location_job/google_dataflow_project_location_jobs.erb b/mmv1/templates/inspec/examples/google_dataflow_project_location_job/google_dataflow_project_location_jobs.erb new file mode 100644 index 000000000..7ea86cf63 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_dataflow_project_location_job/google_dataflow_project_location_jobs.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% project_location_job = grab_attributes(pwd)['project_location_job'] -%> + describe google_dataflow_project_location_jobs(location: <%= doc_generation ? "' #{project_location_job['location']}'":"project_location_job['location']" -%>, projectId: <%= doc_generation ? "' #{project_location_job['projectId']}'":"project_location_job['projectId']" -%>) do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_datafusion_instance/google_datafusion_instance.erb b/mmv1/templates/inspec/examples/google_datafusion_instance/google_datafusion_instance.erb new file mode 100644 index 000000000..df03e2c5b --- /dev/null +++ b/mmv1/templates/inspec/examples/google_datafusion_instance/google_datafusion_instance.erb @@ -0,0 +1,29 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% instance = grab_attributes(pwd)['instance'] -%> +describe google_datafusion_instance(name: <%= doc_generation ? "' #{instance['name']}'":"instance['name']" -%>) do + it { should exist } + its('name') { should cmp <%= doc_generation ? "'#{instance['name']}'" : "instance['name']" -%> } + its('description') { should cmp <%= doc_generation ? "'#{instance['description']}'" : "instance['description']" -%> } + its('type') { should cmp <%= doc_generation ? "'#{instance['type']}'" : "instance['type']" -%> } + its('create_time') { should cmp <%= doc_generation ? "'#{instance['create_time']}'" : "instance['create_time']" -%> } + its('update_time') { should cmp <%= doc_generation ? "'#{instance['update_time']}'" : "instance['update_time']" -%> } + its('state') { should cmp <%= doc_generation ? "'#{instance['state']}'" : "instance['state']" -%> } + its('state_message') { should cmp <%= doc_generation ? "'#{instance['state_message']}'" : "instance['state_message']" -%> } + its('service_endpoint') { should cmp <%= doc_generation ? "'#{instance['service_endpoint']}'" : "instance['service_endpoint']" -%> } + its('zone') { should cmp <%= doc_generation ? "'#{instance['zone']}'" : "instance['zone']" -%> } + its('version') { should cmp <%= doc_generation ? "'#{instance['version']}'" : "instance['version']" -%> } + its('service_account') { should cmp <%= doc_generation ? "'#{instance['service_account']}'" : "instance['service_account']" -%> } + its('display_name') { should cmp <%= doc_generation ? "'#{instance['display_name']}'" : "instance['display_name']" -%> } + its('api_endpoint') { should cmp <%= doc_generation ? "'#{instance['api_endpoint']}'" : "instance['api_endpoint']" -%> } + its('gcs_bucket') { should cmp <%= doc_generation ? "'#{instance['gcs_bucket']}'" : "instance['gcs_bucket']" -%> } + its('p4service_account') { should cmp <%= doc_generation ? "'#{instance['p4service_account']}'" : "instance['p4service_account']" -%> } + its('tenant_project_id') { should cmp <%= doc_generation ? "'#{instance['tenant_project_id']}'" : "instance['tenant_project_id']" -%> } + its('dataproc_service_account') { should cmp <%= doc_generation ? "'#{instance['dataproc_service_account']}'" : "instance['dataproc_service_account']" -%> } + its('workforce_identity_service_endpoint') { should cmp <%= doc_generation ? "'#{instance['workforce_identity_service_endpoint']}'" : "instance['workforce_identity_service_endpoint']" -%> } + its('patch_revision') { should cmp <%= doc_generation ? "'#{instance['patch_revision']}'" : "instance['patch_revision']" -%> } + +end + +describe google_datafusion_instance(name: "does_not_exit") do + it { should_not exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_datafusion_instance/google_datafusion_instance_attributes.erb b/mmv1/templates/inspec/examples/google_datafusion_instance/google_datafusion_instance_attributes.erb new file mode 100644 index 000000000..c6dbbcebf --- /dev/null +++ b/mmv1/templates/inspec/examples/google_datafusion_instance/google_datafusion_instance_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + instance = input('instance', value: <%= JSON.pretty_generate(grab_attributes(pwd)['instance']) -%>, description: 'instance description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_datafusion_instance/google_datafusion_instances.erb b/mmv1/templates/inspec/examples/google_datafusion_instance/google_datafusion_instances.erb new file mode 100644 index 000000000..3ec4c45f1 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_datafusion_instance/google_datafusion_instances.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% instance = grab_attributes(pwd)['instance'] -%> + describe google_datafusion_instances(parent: <%= doc_generation ? "' #{instance['parent']}'":"instance['parent']" -%>) do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_dataproc_batch/google_dataproc_batch.erb b/mmv1/templates/inspec/examples/google_dataproc_batch/google_dataproc_batch.erb new file mode 100644 index 000000000..701921341 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_dataproc_batch/google_dataproc_batch.erb @@ -0,0 +1,18 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% batch = grab_attributes(pwd)['batch'] -%> +describe google_dataproc_batch(name: <%= doc_generation ? "' #{batch['name']}'":"batch['name']" -%>) do + it { should exist } + its('name') { should cmp <%= doc_generation ? "'#{batch['name']}'" : "batch['name']" -%> } + its('uuid') { should cmp <%= doc_generation ? "'#{batch['uuid']}'" : "batch['uuid']" -%> } + its('create_time') { should cmp <%= doc_generation ? "'#{batch['create_time']}'" : "batch['create_time']" -%> } + its('state') { should cmp <%= doc_generation ? "'#{batch['state']}'" : "batch['state']" -%> } + its('state_message') { should cmp <%= doc_generation ? "'#{batch['state_message']}'" : "batch['state_message']" -%> } + its('state_time') { should cmp <%= doc_generation ? "'#{batch['state_time']}'" : "batch['state_time']" -%> } + its('creator') { should cmp <%= doc_generation ? "'#{batch['creator']}'" : "batch['creator']" -%> } + its('operation') { should cmp <%= doc_generation ? "'#{batch['operation']}'" : "batch['operation']" -%> } + +end + +describe google_dataproc_batch(name: "does_not_exit") do + it { should_not exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_dataproc_batch/google_dataproc_batch_attributes.erb b/mmv1/templates/inspec/examples/google_dataproc_batch/google_dataproc_batch_attributes.erb new file mode 100644 index 000000000..46aded1d6 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_dataproc_batch/google_dataproc_batch_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + batch = input('batch', value: <%= JSON.pretty_generate(grab_attributes(pwd)['batch']) -%>, description: 'batch description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_dataproc_batch/google_dataproc_batches.erb b/mmv1/templates/inspec/examples/google_dataproc_batch/google_dataproc_batches.erb new file mode 100644 index 000000000..51a4ab767 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_dataproc_batch/google_dataproc_batches.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% batch = grab_attributes(pwd)['batch'] -%> + describe google_dataproc_batches(parent: <%= doc_generation ? "' #{batch['parent']}'":"batch['parent']" -%>) do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_dataproc_metastore_project_location_federation/google_dataproc_metastore_project_location_federation.erb b/mmv1/templates/inspec/examples/google_dataproc_metastore_project_location_federation/google_dataproc_metastore_project_location_federation.erb new file mode 100644 index 000000000..445483253 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_dataproc_metastore_project_location_federation/google_dataproc_metastore_project_location_federation.erb @@ -0,0 +1,18 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% project_location_federation = grab_attributes(pwd)['project_location_federation'] -%> +describe google_dataproc_metastore_project_location_federation(name: <%= doc_generation ? "' #{project_location_federation['name']}'":"project_location_federation['name']" -%>) do + it { should exist } + its('name') { should cmp <%= doc_generation ? "'#{project_location_federation['name']}'" : "project_location_federation['name']" -%> } + its('create_time') { should cmp <%= doc_generation ? "'#{project_location_federation['create_time']}'" : "project_location_federation['create_time']" -%> } + its('update_time') { should cmp <%= doc_generation ? "'#{project_location_federation['update_time']}'" : "project_location_federation['update_time']" -%> } + its('version') { should cmp <%= doc_generation ? "'#{project_location_federation['version']}'" : "project_location_federation['version']" -%> } + its('endpoint_uri') { should cmp <%= doc_generation ? "'#{project_location_federation['endpoint_uri']}'" : "project_location_federation['endpoint_uri']" -%> } + its('state') { should cmp <%= doc_generation ? "'#{project_location_federation['state']}'" : "project_location_federation['state']" -%> } + its('state_message') { should cmp <%= doc_generation ? "'#{project_location_federation['state_message']}'" : "project_location_federation['state_message']" -%> } + its('uid') { should cmp <%= doc_generation ? "'#{project_location_federation['uid']}'" : "project_location_federation['uid']" -%> } + +end + +describe google_dataproc_metastore_project_location_federation(name: "does_not_exit") do + it { should_not exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_dataproc_metastore_project_location_federation/google_dataproc_metastore_project_location_federation_attributes.erb b/mmv1/templates/inspec/examples/google_dataproc_metastore_project_location_federation/google_dataproc_metastore_project_location_federation_attributes.erb new file mode 100644 index 000000000..3fbc86ebc --- /dev/null +++ b/mmv1/templates/inspec/examples/google_dataproc_metastore_project_location_federation/google_dataproc_metastore_project_location_federation_attributes.erb @@ -0,0 +1,2 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') +project_location_federation = input('project_location_federation', value: <%= JSON.pretty_generate(grab_attributes(pwd)['project_location_federation']) -%>, description: 'project_location_federation description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_dataproc_metastore_project_location_federation/google_dataproc_metastore_project_location_federations.erb b/mmv1/templates/inspec/examples/google_dataproc_metastore_project_location_federation/google_dataproc_metastore_project_location_federations.erb new file mode 100644 index 000000000..19093aa70 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_dataproc_metastore_project_location_federation/google_dataproc_metastore_project_location_federations.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% project_location_federation = grab_attributes(pwd)['project_location_federation'] -%> + describe google_dataproc_metastore_project_location_federations(parent: <%= doc_generation ? "' #{project_location_federation['parent']}'":"project_location_federation['parent']" -%>) do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_dataproc_metastore_project_location_service/google_dataproc_metastore_project_location_service.erb b/mmv1/templates/inspec/examples/google_dataproc_metastore_project_location_service/google_dataproc_metastore_project_location_service.erb new file mode 100644 index 000000000..0e3798328 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_dataproc_metastore_project_location_service/google_dataproc_metastore_project_location_service.erb @@ -0,0 +1,22 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% project_location_service = grab_attributes(pwd)['project_location_service'] -%> +describe google_dataproc_metastore_project_location_service(name: <%= doc_generation ? "' #{project_location_service['name']}'":"project_location_service['name']" -%>) do + it { should exist } + its('name') { should cmp <%= doc_generation ? "'#{project_location_service['name']}'" : "project_location_service['name']" -%> } + its('create_time') { should cmp <%= doc_generation ? "'#{project_location_service['create_time']}'" : "project_location_service['create_time']" -%> } + its('update_time') { should cmp <%= doc_generation ? "'#{project_location_service['update_time']}'" : "project_location_service['update_time']" -%> } + its('network') { should cmp <%= doc_generation ? "'#{project_location_service['network']}'" : "project_location_service['network']" -%> } + its('endpoint_uri') { should cmp <%= doc_generation ? "'#{project_location_service['endpoint_uri']}'" : "project_location_service['endpoint_uri']" -%> } + its('state') { should cmp <%= doc_generation ? "'#{project_location_service['state']}'" : "project_location_service['state']" -%> } + its('state_message') { should cmp <%= doc_generation ? "'#{project_location_service['state_message']}'" : "project_location_service['state_message']" -%> } + its('artifact_gcs_uri') { should cmp <%= doc_generation ? "'#{project_location_service['artifact_gcs_uri']}'" : "project_location_service['artifact_gcs_uri']" -%> } + its('tier') { should cmp <%= doc_generation ? "'#{project_location_service['tier']}'" : "project_location_service['tier']" -%> } + its('uid') { should cmp <%= doc_generation ? "'#{project_location_service['uid']}'" : "project_location_service['uid']" -%> } + its('release_channel') { should cmp <%= doc_generation ? "'#{project_location_service['release_channel']}'" : "project_location_service['release_channel']" -%> } + its('database_type') { should cmp <%= doc_generation ? "'#{project_location_service['database_type']}'" : "project_location_service['database_type']" -%> } + +end + +describe google_dataproc_metastore_project_location_service(name: "does_not_exit") do + it { should_not exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_dataproc_metastore_project_location_service/google_dataproc_metastore_project_location_service_attributes.erb b/mmv1/templates/inspec/examples/google_dataproc_metastore_project_location_service/google_dataproc_metastore_project_location_service_attributes.erb new file mode 100644 index 000000000..7527a4f67 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_dataproc_metastore_project_location_service/google_dataproc_metastore_project_location_service_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + project_location_service = input('project_location_service', value: <%= JSON.pretty_generate(grab_attributes(pwd)['project_location_service']) -%>, description: 'project_location_service description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_dataproc_metastore_project_location_service/google_dataproc_metastore_project_location_services.erb b/mmv1/templates/inspec/examples/google_dataproc_metastore_project_location_service/google_dataproc_metastore_project_location_services.erb new file mode 100644 index 000000000..6ebdff3ed --- /dev/null +++ b/mmv1/templates/inspec/examples/google_dataproc_metastore_project_location_service/google_dataproc_metastore_project_location_services.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% project_location_service = grab_attributes(pwd)['project_location_service'] -%> + describe google_dataproc_metastore_project_location_services(parent: <%= doc_generation ? "' #{project_location_service['parent']}'":"project_location_service['parent']" -%>) do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_dataproc_metastore_project_location_service_backup/google_dataproc_metastore_project_location_service_backup.erb b/mmv1/templates/inspec/examples/google_dataproc_metastore_project_location_service_backup/google_dataproc_metastore_project_location_service_backup.erb new file mode 100644 index 000000000..49dd9296e --- /dev/null +++ b/mmv1/templates/inspec/examples/google_dataproc_metastore_project_location_service_backup/google_dataproc_metastore_project_location_service_backup.erb @@ -0,0 +1,15 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% project_location_service_backup = grab_attributes(pwd)['project_location_service_backup'] -%> +describe google_dataproc_metastore_project_location_service_backup(name: <%= doc_generation ? "' #{project_location_service_backup['name']}'":"project_location_service_backup['name']" -%>) do + it { should exist } + its('name') { should cmp <%= doc_generation ? "'#{project_location_service_backup['name']}'" : "project_location_service_backup['name']" -%> } + its('create_time') { should cmp <%= doc_generation ? "'#{project_location_service_backup['create_time']}'" : "project_location_service_backup['create_time']" -%> } + its('end_time') { should cmp <%= doc_generation ? "'#{project_location_service_backup['end_time']}'" : "project_location_service_backup['end_time']" -%> } + its('state') { should cmp <%= doc_generation ? "'#{project_location_service_backup['state']}'" : "project_location_service_backup['state']" -%> } + its('description') { should cmp <%= doc_generation ? "'#{project_location_service_backup['description']}'" : "project_location_service_backup['description']" -%> } + +end + +describe google_dataproc_metastore_project_location_service_backup(name: "does_not_exit") do + it { should_not exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_dataproc_metastore_project_location_service_backup/google_dataproc_metastore_project_location_service_backup_attributes.erb b/mmv1/templates/inspec/examples/google_dataproc_metastore_project_location_service_backup/google_dataproc_metastore_project_location_service_backup_attributes.erb new file mode 100644 index 000000000..1b2f53e58 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_dataproc_metastore_project_location_service_backup/google_dataproc_metastore_project_location_service_backup_attributes.erb @@ -0,0 +1,2 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') +project_location_service_backup = input('project_location_service_backup', value: <%= JSON.pretty_generate(grab_attributes(pwd)['project_location_service_backup']) -%>, description: 'project_location_service_backup description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_dataproc_metastore_project_location_service_backup/google_dataproc_metastore_project_location_service_backups.erb b/mmv1/templates/inspec/examples/google_dataproc_metastore_project_location_service_backup/google_dataproc_metastore_project_location_service_backups.erb new file mode 100644 index 000000000..211a30316 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_dataproc_metastore_project_location_service_backup/google_dataproc_metastore_project_location_service_backups.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% project_location_service_backup = grab_attributes(pwd)['project_location_service_backup'] -%> + describe google_dataproc_metastore_project_location_service_backups(parent: <%= doc_generation ? "' #{project_location_service_backup['parent']}'":"project_location_service_backup['parent']" -%>) do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_dataproc_project_location_session/google_dataproc_project_location_session.erb b/mmv1/templates/inspec/examples/google_dataproc_project_location_session/google_dataproc_project_location_session.erb new file mode 100644 index 000000000..feecc8053 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_dataproc_project_location_session/google_dataproc_project_location_session.erb @@ -0,0 +1,19 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% project_location_session = grab_attributes(pwd)['project_location_session'] -%> +describe google_dataproc_project_location_session(name: <%= doc_generation ? "' #{project_location_session['name']}'":"project_location_session['name']" -%>) do + it { should exist } + its('name') { should cmp <%= doc_generation ? "'#{project_location_session['name']}'" : "project_location_session['name']" -%> } + its('uuid') { should cmp <%= doc_generation ? "'#{project_location_session['uuid']}'" : "project_location_session['uuid']" -%> } + its('create_time') { should cmp <%= doc_generation ? "'#{project_location_session['create_time']}'" : "project_location_session['create_time']" -%> } + its('state') { should cmp <%= doc_generation ? "'#{project_location_session['state']}'" : "project_location_session['state']" -%> } + its('state_message') { should cmp <%= doc_generation ? "'#{project_location_session['state_message']}'" : "project_location_session['state_message']" -%> } + its('state_time') { should cmp <%= doc_generation ? "'#{project_location_session['state_time']}'" : "project_location_session['state_time']" -%> } + its('creator') { should cmp <%= doc_generation ? "'#{project_location_session['creator']}'" : "project_location_session['creator']" -%> } + its('user') { should cmp <%= doc_generation ? "'#{project_location_session['user']}'" : "project_location_session['user']" -%> } + its('session_template') { should cmp <%= doc_generation ? "'#{project_location_session['session_template']}'" : "project_location_session['session_template']" -%> } + +end + +describe google_dataproc_project_location_session(name: "does_not_exit") do + it { should_not exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_dataproc_project_location_session/google_dataproc_project_location_session_attributes.erb b/mmv1/templates/inspec/examples/google_dataproc_project_location_session/google_dataproc_project_location_session_attributes.erb new file mode 100644 index 000000000..fc9393330 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_dataproc_project_location_session/google_dataproc_project_location_session_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + project_location_session = input('project_location_session', value: <%= JSON.pretty_generate(grab_attributes(pwd)['project_location_session']) -%>, description: 'project_location_session description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_dataproc_project_location_session/google_dataproc_project_location_sessions.erb b/mmv1/templates/inspec/examples/google_dataproc_project_location_session/google_dataproc_project_location_sessions.erb new file mode 100644 index 000000000..a1591178e --- /dev/null +++ b/mmv1/templates/inspec/examples/google_dataproc_project_location_session/google_dataproc_project_location_sessions.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% project_location_session = grab_attributes(pwd)['project_location_session'] -%> + describe google_dataproc_project_location_sessions(parent: <%= doc_generation ? "' #{project_location_session['parent']}'":"project_location_session['parent']" -%>) do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_dataproc_project_location_workflow_template/google_dataproc_project_location_workflow_template.erb b/mmv1/templates/inspec/examples/google_dataproc_project_location_workflow_template/google_dataproc_project_location_workflow_template.erb new file mode 100644 index 000000000..b0825754a --- /dev/null +++ b/mmv1/templates/inspec/examples/google_dataproc_project_location_workflow_template/google_dataproc_project_location_workflow_template.erb @@ -0,0 +1,15 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% project_location_workflow_template = grab_attributes(pwd)['project_location_workflow_template'] -%> +describe google_dataproc_project_location_workflow_template(name: <%= doc_generation ? "' #{project_location_workflow_template['name']}'":"project_location_workflow_template['name']" -%>) do + it { should exist } + its('id') { should cmp <%= doc_generation ? "'#{project_location_workflow_template['id']}'" : "project_location_workflow_template['id']" -%> } + its('name') { should cmp <%= doc_generation ? "'#{project_location_workflow_template['name']}'" : "project_location_workflow_template['name']" -%> } + its('create_time') { should cmp <%= doc_generation ? "'#{project_location_workflow_template['create_time']}'" : "project_location_workflow_template['create_time']" -%> } + its('update_time') { should cmp <%= doc_generation ? "'#{project_location_workflow_template['update_time']}'" : "project_location_workflow_template['update_time']" -%> } + its('dag_timeout') { should cmp <%= doc_generation ? "'#{project_location_workflow_template['dag_timeout']}'" : "project_location_workflow_template['dag_timeout']" -%> } + +end + +describe google_dataproc_project_location_workflow_template(name: "does_not_exit") do + it { should_not exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_dataproc_project_location_workflow_template/google_dataproc_project_location_workflow_template_attributes.erb b/mmv1/templates/inspec/examples/google_dataproc_project_location_workflow_template/google_dataproc_project_location_workflow_template_attributes.erb new file mode 100644 index 000000000..b916686e6 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_dataproc_project_location_workflow_template/google_dataproc_project_location_workflow_template_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + project_location_workflow_template = input('project_location_workflow_template', value: <%= JSON.pretty_generate(grab_attributes(pwd)['project_location_workflow_template']) -%>, description: 'project_location_workflow_template description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_dataproc_project_location_workflow_template/google_dataproc_project_location_workflow_templates.erb b/mmv1/templates/inspec/examples/google_dataproc_project_location_workflow_template/google_dataproc_project_location_workflow_templates.erb new file mode 100644 index 000000000..4eeba5218 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_dataproc_project_location_workflow_template/google_dataproc_project_location_workflow_templates.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% project_location_workflow_template = grab_attributes(pwd)['project_location_workflow_template'] -%> + describe google_dataproc_project_location_workflow_templates(parent: <%= doc_generation ? "' #{project_location_workflow_template['parent']}'":"project_location_workflow_template['parent']" -%>) do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_dataproc_project_region_autoscaling_policy/google_dataproc_project_region_autoscaling_policies.erb b/mmv1/templates/inspec/examples/google_dataproc_project_region_autoscaling_policy/google_dataproc_project_region_autoscaling_policies.erb new file mode 100644 index 000000000..ae2bf1fa0 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_dataproc_project_region_autoscaling_policy/google_dataproc_project_region_autoscaling_policies.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% project_region_autoscaling_policy = grab_attributes(pwd)['project_region_autoscaling_policy'] -%> + describe google_dataproc_project_region_autoscaling_policies(parent: <%= doc_generation ? "' #{project_region_autoscaling_policy['parent']}'":"project_region_autoscaling_policy['parent']" -%>) do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_dataproc_project_region_autoscaling_policy/google_dataproc_project_region_autoscaling_policy.erb b/mmv1/templates/inspec/examples/google_dataproc_project_region_autoscaling_policy/google_dataproc_project_region_autoscaling_policy.erb new file mode 100644 index 000000000..660710b1b --- /dev/null +++ b/mmv1/templates/inspec/examples/google_dataproc_project_region_autoscaling_policy/google_dataproc_project_region_autoscaling_policy.erb @@ -0,0 +1,12 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% project_region_autoscaling_policy = grab_attributes(pwd)['project_region_autoscaling_policy'] -%> +describe google_dataproc_project_region_autoscaling_policy(name: <%= doc_generation ? "' #{project_region_autoscaling_policy['name']}'":"project_region_autoscaling_policy['name']" -%>) do + it { should exist } + its('id') { should cmp <%= doc_generation ? "'#{project_region_autoscaling_policy['id']}'" : "project_region_autoscaling_policy['id']" -%> } + its('name') { should cmp <%= doc_generation ? "'#{project_region_autoscaling_policy['name']}'" : "project_region_autoscaling_policy['name']" -%> } + +end + +describe google_dataproc_project_region_autoscaling_policy(name: "does_not_exit") do + it { should_not exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_dataproc_project_region_autoscaling_policy/google_dataproc_project_region_autoscaling_policy_attributes.erb b/mmv1/templates/inspec/examples/google_dataproc_project_region_autoscaling_policy/google_dataproc_project_region_autoscaling_policy_attributes.erb new file mode 100644 index 000000000..9405cf978 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_dataproc_project_region_autoscaling_policy/google_dataproc_project_region_autoscaling_policy_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + project_region_autoscaling_policy = input('project_region_autoscaling_policy', value: <%= JSON.pretty_generate(grab_attributes(pwd)['project_region_autoscaling_policy']) -%>, description: 'project_region_autoscaling_policy description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_dataproc_project_region_cluster/google_dataproc_project_region_cluster.erb b/mmv1/templates/inspec/examples/google_dataproc_project_region_cluster/google_dataproc_project_region_cluster.erb new file mode 100644 index 000000000..f7321c672 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_dataproc_project_region_cluster/google_dataproc_project_region_cluster.erb @@ -0,0 +1,10 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% project_region_cluster = grab_attributes(pwd)['project_region_cluster'] -%> +describe google_dataproc_project_region_cluster(clusterName: <%= doc_generation ? "' #{project_region_cluster['clusterName']}'":"project_region_cluster['clusterName']" -%>, projectId: <%= doc_generation ? "' #{project_region_cluster['projectId']}'":"project_region_cluster['projectId']" -%>, region: <%= doc_generation ? "' #{project_region_cluster['region']}'":"project_region_cluster['region']" -%>) do + it { should exist } + +end + +describe google_dataproc_project_region_cluster(clusterName: <%= doc_generation ? "' #{project_region_cluster['clusterName']}'":"project_region_cluster['clusterName']" -%>, projectId: <%= doc_generation ? "' #{project_region_cluster['projectId']}'":"project_region_cluster['projectId']" -%>, region: <%= doc_generation ? "' #{project_region_cluster['region']}'":"project_region_cluster['region']" -%>) do + it { should_not exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_dataproc_project_region_cluster/google_dataproc_project_region_cluster_attributes.erb b/mmv1/templates/inspec/examples/google_dataproc_project_region_cluster/google_dataproc_project_region_cluster_attributes.erb new file mode 100644 index 000000000..eabebc0fd --- /dev/null +++ b/mmv1/templates/inspec/examples/google_dataproc_project_region_cluster/google_dataproc_project_region_cluster_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + project_region_cluster = input('project_region_cluster', value: <%= JSON.pretty_generate(grab_attributes(pwd)['project_region_cluster']) -%>, description: 'project_region_cluster description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_dataproc_project_region_cluster/google_dataproc_project_region_clusters.erb b/mmv1/templates/inspec/examples/google_dataproc_project_region_cluster/google_dataproc_project_region_clusters.erb new file mode 100644 index 000000000..615efbb58 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_dataproc_project_region_cluster/google_dataproc_project_region_clusters.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% project_region_cluster = grab_attributes(pwd)['project_region_cluster'] -%> + describe google_dataproc_project_region_clusters(projectId: <%= doc_generation ? "' #{project_region_cluster['projectId']}'":"project_region_cluster['projectId']" -%>, region: <%= doc_generation ? "' #{project_region_cluster['region']}'":"project_region_cluster['region']" -%>) do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_dataproc_project_region_job/google_dataproc_project_region_job.erb b/mmv1/templates/inspec/examples/google_dataproc_project_region_job/google_dataproc_project_region_job.erb new file mode 100644 index 000000000..628038953 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_dataproc_project_region_job/google_dataproc_project_region_job.erb @@ -0,0 +1,13 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% project_region_job = grab_attributes(pwd)['project_region_job'] -%> +describe google_dataproc_project_region_job(jobId: <%= doc_generation ? "' #{project_region_job['jobId']}'":"project_region_job['jobId']" -%>, projectId: <%= doc_generation ? "' #{project_region_job['projectId']}'":"project_region_job['projectId']" -%>, region: <%= doc_generation ? "' #{project_region_job['region']}'":"project_region_job['region']" -%>) do + it { should exist } + its('driver_output_resource_uri') { should cmp <%= doc_generation ? "'#{project_region_job['driver_output_resource_uri']}'" : "project_region_job['driver_output_resource_uri']" -%> } + its('driver_control_files_uri') { should cmp <%= doc_generation ? "'#{project_region_job['driver_control_files_uri']}'" : "project_region_job['driver_control_files_uri']" -%> } + its('job_uuid') { should cmp <%= doc_generation ? "'#{project_region_job['job_uuid']}'" : "project_region_job['job_uuid']" -%> } + +end + +describe google_dataproc_project_region_job(jobId: <%= doc_generation ? "' #{project_region_job['jobId']}'":"project_region_job['jobId']" -%>, projectId: <%= doc_generation ? "' #{project_region_job['projectId']}'":"project_region_job['projectId']" -%>, region: <%= doc_generation ? "' #{project_region_job['region']}'":"project_region_job['region']" -%>) do + it { should_not exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_dataproc_project_region_job/google_dataproc_project_region_job_attributes.erb b/mmv1/templates/inspec/examples/google_dataproc_project_region_job/google_dataproc_project_region_job_attributes.erb new file mode 100644 index 000000000..2362c21b3 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_dataproc_project_region_job/google_dataproc_project_region_job_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + project_region_job = input('project_region_job', value: <%= JSON.pretty_generate(grab_attributes(pwd)['project_region_job']) -%>, description: 'project_region_job description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_dataproc_project_region_job/google_dataproc_project_region_jobs.erb b/mmv1/templates/inspec/examples/google_dataproc_project_region_job/google_dataproc_project_region_jobs.erb new file mode 100644 index 000000000..824f2177b --- /dev/null +++ b/mmv1/templates/inspec/examples/google_dataproc_project_region_job/google_dataproc_project_region_jobs.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% project_region_job = grab_attributes(pwd)['project_region_job'] -%> + describe google_dataproc_project_region_jobs() do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_dlp_dt/google_dlp_dt.erb b/mmv1/templates/inspec/examples/google_dlp_dt/google_dlp_dt.erb new file mode 100644 index 000000000..549f3a438 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_dlp_dt/google_dlp_dt.erb @@ -0,0 +1,11 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% dlp = grab_attributes(pwd)['dlp'] -%> + +describe google_dlp_dt(parent: "projects/#{<%= gcp_project_id -%>}/locations/#{<%= doc_generation ? "'#{dlp['deidentify_templates'][:location]}'" : "dlp['deidentify_templates'][:location]" -%>}", name: <%= doc_generation ? "'#{dlp['deidentify_templates'][:name]}'" : "dlp['deidentify_templates'][:name]" -%>) do + it { should exist } + its('display_name') { should cmp <%= doc_generation ? "'#{dlp['deidentify_templates'][:name]}'" : "dlp['deidentify_templates'][:name]" -%> } +end + +describe google_dlp_dt(parent: "projects/#{<%= gcp_project_id -%>}/locations/#{<%= doc_generation ? "'#{dlp['deidentify_templates'][:location]}'" : "dlp['deidentify_templates'][location]" -%>}", name: 'nonexistent') do + it { should_not exist } +end diff --git a/mmv1/templates/inspec/examples/google_dlp_dt/google_dlp_dt_attributes.erb b/mmv1/templates/inspec/examples/google_dlp_dt/google_dlp_dt_attributes.erb new file mode 100644 index 000000000..3738170a8 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_dlp_dt/google_dlp_dt_attributes.erb @@ -0,0 +1,2 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') +dlp = input('dlp', value: <%= JSON.pretty_generate(grab_attributes(pwd)['dlp']) -%>, description: 'DLP ') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_dlp_dt/google_dlp_dts.erb b/mmv1/templates/inspec/examples/google_dlp_dt/google_dlp_dts.erb new file mode 100644 index 000000000..56d79cfb5 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_dlp_dt/google_dlp_dts.erb @@ -0,0 +1,8 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% dlp = grab_attributes(pwd)['dlp'] -%> + + +describe google_dlp_dts(parent: "projects/#{<%= gcp_project_id -%>}/locations/#{<%= doc_generation ? "'#{dlp['deidentify_templates'][:location]}'" : "dlp['deidentify_templates'][:location]" -%>}") do + it { should exist } + its('display_names') { should include <%= doc_generation ? "'#{dlp['deidentify_templates'][:name]}'" : "dlp['deidentify_templates'][:name]" -%> } +end diff --git a/mmv1/templates/inspec/examples/google_dlp_inspect_template/google_dlp_inspect_template.erb b/mmv1/templates/inspec/examples/google_dlp_inspect_template/google_dlp_inspect_template.erb new file mode 100644 index 000000000..b3125e965 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_dlp_inspect_template/google_dlp_inspect_template.erb @@ -0,0 +1,14 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% dlp = grab_attributes(pwd)['dlp'] -%> + +describe google_dlp_inspect_template(parent: "projects/#{<%= gcp_project_id -%>}/locations/#{<%= doc_generation ? "'#{dlp['location']}'" : "dlp['location']" -%>}", name: <%= doc_generation ? "'#{dlp['name']}'" : "dlp['name']" -%>) do +it { should exist } +its('name') { should cmp <%= doc_generation ? "'#{dlp['name']}'" : "dlp['name']" -%> } +its('type') { should cmp <%= doc_generation ? "'#{dlp['type']}'" : "dlp['type']" -%> } +its('state') { should cmp <%= doc_generation ? "'#{dlp['state']}'" : "dlp['state']" -%> } +its('inspectDetails.requestedOptions.snapshotInspectTemplate') { should cmp <%= doc_generation ? "'#{dlp['inspectDetails']['requestedOptions']['snapshotInspectTemplate']}'" : "dlp['inspectDetails']['requestedOptions']['snapshotInspectTemplate']" -%> } +end + +describe google_dlp_inspect_template(parent: "projects/#{<%= gcp_project_id -%>}/locations/#{<%= doc_generation ? "'#{dlp['location']}'" : "dlp['location']" -%>}", name: 'nonexistent') do +it { should_not exist } +end diff --git a/mmv1/templates/inspec/examples/google_dlp_inspect_template/google_dlp_inspect_template_attributes.erb b/mmv1/templates/inspec/examples/google_dlp_inspect_template/google_dlp_inspect_template_attributes.erb new file mode 100644 index 000000000..3738170a8 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_dlp_inspect_template/google_dlp_inspect_template_attributes.erb @@ -0,0 +1,2 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') +dlp = input('dlp', value: <%= JSON.pretty_generate(grab_attributes(pwd)['dlp']) -%>, description: 'DLP ') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_dlp_inspect_template/google_dlp_inspect_templates.erb b/mmv1/templates/inspec/examples/google_dlp_inspect_template/google_dlp_inspect_templates.erb new file mode 100644 index 000000000..53e1f9bc3 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_dlp_inspect_template/google_dlp_inspect_templates.erb @@ -0,0 +1,10 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% dlp = grab_attributes(pwd)['dlp'] -%> + + +describe google_dlp_inspect_templates(parent: "projects/#{<%= gcp_project_id -%>}/locations/#{<%= doc_generation ? "'#{dlp['location']}'" : "dlp['location']" -%>}") do +it { should exist } +its('names') { should include <%= doc_generation ? "'#{dlp['name']}'" : "dlp['name']" -%> } +its('types') { should include <%= doc_generation ? "'#{dlp['type']}'" : "dlp['type']" -%> } +its('states') { should include <%= doc_generation ? "'#{dlp['state']}'" : "dlp['state']" -%> } +end diff --git a/mmv1/templates/inspec/examples/google_dlp_job/google_dlp_job.erb b/mmv1/templates/inspec/examples/google_dlp_job/google_dlp_job.erb new file mode 100644 index 000000000..1cfc34f75 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_dlp_job/google_dlp_job.erb @@ -0,0 +1,14 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% dlp = grab_attributes(pwd)['dlp'] -%> + +describe google_dlp_job(parent: "projects/#{<%= gcp_project_id -%>}/locations/#{<%= doc_generation ? "'#{dlp['location']}'" : "dlp['location']" -%>}", name: <%= doc_generation ? "'#{dlp['name']}'" : "dlp['name']" -%>) do + it { should exist } + its('name') { should cmp <%= doc_generation ? "'#{dlp['name']}'" : "dlp['name']" -%> } + its('type') { should cmp <%= doc_generation ? "'#{dlp['type']}'" : "dlp['type']" -%> } + its('state') { should cmp <%= doc_generation ? "'#{dlp['state']}'" : "dlp['state']" -%> } + its('inspectDetails.requestedOptions.snapshotInspectTemplate') { should cmp <%= doc_generation ? "'#{dlp['inspectDetails']['requestedOptions']['snapshotInspectTemplate']}'" : "dlp['inspectDetails']['requestedOptions']['snapshotInspectTemplate']" -%> } +end + +describe google_dlp_job(parent: "projects/#{<%= gcp_project_id -%>}/locations/#{<%= doc_generation ? "'#{dlp['location']}'" : "dlp['location']" -%>}", name: 'nonexistent') do + it { should_not exist } +end diff --git a/mmv1/templates/inspec/examples/google_dlp_job/google_dlp_job_attributes.erb b/mmv1/templates/inspec/examples/google_dlp_job/google_dlp_job_attributes.erb new file mode 100644 index 000000000..3738170a8 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_dlp_job/google_dlp_job_attributes.erb @@ -0,0 +1,2 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') +dlp = input('dlp', value: <%= JSON.pretty_generate(grab_attributes(pwd)['dlp']) -%>, description: 'DLP ') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_dlp_job/google_dlp_jobs.erb b/mmv1/templates/inspec/examples/google_dlp_job/google_dlp_jobs.erb new file mode 100644 index 000000000..0a8b4d15c --- /dev/null +++ b/mmv1/templates/inspec/examples/google_dlp_job/google_dlp_jobs.erb @@ -0,0 +1,10 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% dlp = grab_attributes(pwd)['dlp'] -%> + + +describe google_dlp_jobs(parent: "projects/#{<%= gcp_project_id -%>}/locations/#{<%= doc_generation ? "'#{dlp['location']}'" : "dlp['location']" -%>}") do + it { should exist } + its('names') { should include <%= doc_generation ? "'#{dlp['name']}'" : "dlp['name']" -%> } + its('types') { should include <%= doc_generation ? "'#{dlp['type']}'" : "dlp['type']" -%> } + its('states') { should include <%= doc_generation ? "'#{dlp['state']}'" : "dlp['state']" -%> } +end diff --git a/mmv1/templates/inspec/examples/google_dlp_job_trigger/google_dlp_job_trigger.erb b/mmv1/templates/inspec/examples/google_dlp_job_trigger/google_dlp_job_trigger.erb new file mode 100644 index 000000000..5926aaf1b --- /dev/null +++ b/mmv1/templates/inspec/examples/google_dlp_job_trigger/google_dlp_job_trigger.erb @@ -0,0 +1,14 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% dlp = grab_attributes(pwd)['dlp'] -%> + +describe google_dlp_job_trigger(parent: "projects/#{<%= gcp_project_id -%>}/locations/#{<%= doc_generation ? "'#{dlp['location']}'" : "dlp['location']" -%>}",name: <%= doc_generation ? "'#{dlp['job_trigger_name']}'" : "dlp['job_trigger_name']" -%>) do +it { should exist } +its('name') { should cmp <%= doc_generation ? "'#{dlp['job_trigger_name']}'" : "dlp['job_trigger_name']" -%> } +its('display_name') { should cmp <%= doc_generation ? "'#{dlp['job_trigger_display_name']}'" : "dlp['job_trigger_display_name']" -%> } +its('description') { should cmp <%= doc_generation ? "'#{dlp['job_trigger_description']}'" : "dlp['job_trigger_description']" -%> } +its('status') { should cmp <%= doc_generation ? "'#{dlp['job_trigger_status']}'" : "dlp['job_trigger_status']" -%> } +end + +describe google_dlp_job_trigger(parent: <%= gcp_project_id -%>, name: 'nonexistent') do +it { should_not exist } +end diff --git a/mmv1/templates/inspec/examples/google_dlp_job_trigger/google_dlp_job_trigger_attributes.erb b/mmv1/templates/inspec/examples/google_dlp_job_trigger/google_dlp_job_trigger_attributes.erb new file mode 100644 index 000000000..3738170a8 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_dlp_job_trigger/google_dlp_job_trigger_attributes.erb @@ -0,0 +1,2 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') +dlp = input('dlp', value: <%= JSON.pretty_generate(grab_attributes(pwd)['dlp']) -%>, description: 'DLP ') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_dlp_job_trigger/google_dlp_job_triggers.erb b/mmv1/templates/inspec/examples/google_dlp_job_trigger/google_dlp_job_triggers.erb new file mode 100644 index 000000000..128dd66a2 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_dlp_job_trigger/google_dlp_job_triggers.erb @@ -0,0 +1,11 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% dlp = grab_attributes(pwd)['dlp'] -%> + + +describe google_dlp_job_triggers(parent: "projects/#{<%= gcp_project_id -%>}/locations/#{<%= doc_generation ? "'#{dlp['location']}'" : "dlp['location']" -%>}") do +it { should exist } +its('names') { should include <%= doc_generation ? "'#{dlp['job_trigger_name']}'" : "dlp['job_trigger_name']" -%> } +its('display_name') { should include <%= doc_generation ? "'#{dlp['job_trigger_display_name']}'" : "dlp['job_trigger_display_name']" -%> } +its('descriptions') { should include <%= doc_generation ? "'#{dlp['job_trigger_description']}'" : "dlp['job_trigger_description']" -%> } +its('status') { should include <%= doc_generation ? "'#{dlp['job_trigger_status']}'" : "dlp['job_trigger_status']" -%> } +end diff --git a/mmv1/templates/inspec/examples/google_dlp_stored_info_type/google_dlp_stored_info_type.erb b/mmv1/templates/inspec/examples/google_dlp_stored_info_type/google_dlp_stored_info_type.erb new file mode 100644 index 000000000..4402fb4e6 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_dlp_stored_info_type/google_dlp_stored_info_type.erb @@ -0,0 +1,13 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% dlp = grab_attributes(pwd)['dlp'] -%> + +describe google_dlp_stored_info_type(parent: "projects/#{<%= gcp_project_id -%>}/locations/#{<%= doc_generation ? "'#{dlp['location']}'" : "dlp['location']" -%>}",name: <%= doc_generation ? "'#{dlp['stored_info_type_name']}'" : "dlp['stored_info_type_name']" -%>) do +it { should exist } +its('name') { should cmp <%= doc_generation ? "'#{dlp['name']}'" : "dlp['name']" -%> } +its('type') { should cmp <%= doc_generation ? "'#{dlp['type']}'" : "dlp['type']" -%> } +its('state') { should cmp <%= doc_generation ? "'#{dlp['state']}'" : "dlp['state']" -%> } +end + +describe google_dlp_stored_info_type(parent: "projects/#{<%= gcp_project_id -%>}/locations/#{<%= doc_generation ? "'#{dlp['location']}'" : "dlp['location']" -%>}", name: 'nonexistent') do +it { should_not exist } +end diff --git a/mmv1/templates/inspec/examples/google_dlp_stored_info_type/google_dlp_stored_info_type_attributes.erb b/mmv1/templates/inspec/examples/google_dlp_stored_info_type/google_dlp_stored_info_type_attributes.erb new file mode 100644 index 000000000..3738170a8 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_dlp_stored_info_type/google_dlp_stored_info_type_attributes.erb @@ -0,0 +1,2 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') +dlp = input('dlp', value: <%= JSON.pretty_generate(grab_attributes(pwd)['dlp']) -%>, description: 'DLP ') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_dlp_stored_info_type/google_dlp_stored_info_types.erb b/mmv1/templates/inspec/examples/google_dlp_stored_info_type/google_dlp_stored_info_types.erb new file mode 100644 index 000000000..4112cbb0d --- /dev/null +++ b/mmv1/templates/inspec/examples/google_dlp_stored_info_type/google_dlp_stored_info_types.erb @@ -0,0 +1,8 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% dlp = grab_attributes(pwd)['dlp'] -%> + + +describe google_dlp_stored_info_types(parent: "projects/#{<%= gcp_project_id -%>}/locations/#{<%= doc_generation ? "'#{dlp['location']}'" : "dlp['location']" -%>}") do +it { should exist } +its('names') { should include <%= doc_generation ? "'#{dlp['name']}'" : "dlp['name']" -%> } +end diff --git a/mmv1/templates/inspec/examples/google_iam_project_service_account_key/google_iam_project_service_account_key.erb b/mmv1/templates/inspec/examples/google_iam_project_service_account_key/google_iam_project_service_account_key.erb new file mode 100644 index 000000000..e5a7f55b1 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_iam_project_service_account_key/google_iam_project_service_account_key.erb @@ -0,0 +1,19 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% project_service_account_key = grab_attributes(pwd)['project_service_account_key'] -%> +describe google_iam_project_service_account_key(name: <%= doc_generation ? "' #{project_service_account_key['name']}'":"project_service_account_key['name']" -%>) do + it { should exist } + its('name') { should cmp <%= doc_generation ? "'#{project_service_account_key['name']}'" : "project_service_account_key['name']" -%> } + its('private_key_type') { should cmp <%= doc_generation ? "'#{project_service_account_key['private_key_type']}'" : "project_service_account_key['private_key_type']" -%> } + its('key_algorithm') { should cmp <%= doc_generation ? "'#{project_service_account_key['key_algorithm']}'" : "project_service_account_key['key_algorithm']" -%> } + its('private_key_data') { should cmp <%= doc_generation ? "'#{project_service_account_key['private_key_data']}'" : "project_service_account_key['private_key_data']" -%> } + its('public_key_data') { should cmp <%= doc_generation ? "'#{project_service_account_key['public_key_data']}'" : "project_service_account_key['public_key_data']" -%> } + its('valid_after_time') { should cmp <%= doc_generation ? "'#{project_service_account_key['valid_after_time']}'" : "project_service_account_key['valid_after_time']" -%> } + its('valid_before_time') { should cmp <%= doc_generation ? "'#{project_service_account_key['valid_before_time']}'" : "project_service_account_key['valid_before_time']" -%> } + its('key_origin') { should cmp <%= doc_generation ? "'#{project_service_account_key['key_origin']}'" : "project_service_account_key['key_origin']" -%> } + its('key_type') { should cmp <%= doc_generation ? "'#{project_service_account_key['key_type']}'" : "project_service_account_key['key_type']" -%> } + +end + +describe google_iam_project_service_account_key(name: "does_not_exit") do + it { should_not exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_iam_project_service_account_key/google_iam_project_service_account_key_attributes.erb b/mmv1/templates/inspec/examples/google_iam_project_service_account_key/google_iam_project_service_account_key_attributes.erb new file mode 100644 index 000000000..92cd0ab69 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_iam_project_service_account_key/google_iam_project_service_account_key_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + project_service_account_key = input('project_service_account_key', value: <%= JSON.pretty_generate(grab_attributes(pwd)['project_service_account_key']) -%>, description: 'project_service_account_key description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_iam_project_service_account_key/google_iam_project_service_account_keys.erb b/mmv1/templates/inspec/examples/google_iam_project_service_account_key/google_iam_project_service_account_keys.erb new file mode 100644 index 000000000..a4f33409b --- /dev/null +++ b/mmv1/templates/inspec/examples/google_iam_project_service_account_key/google_iam_project_service_account_keys.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% project_service_account_key = grab_attributes(pwd)['project_service_account_key'] -%> + describe google_iam_project_service_account_keys(name: <%= doc_generation ? "' #{project_service_account_key['name']}'":"project_service_account_key['name']" -%>) do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_kms_key_ring_import_job/google_kms_key_ring_import_job.erb b/mmv1/templates/inspec/examples/google_kms_key_ring_import_job/google_kms_key_ring_import_job.erb new file mode 100644 index 000000000..e340880c9 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_kms_key_ring_import_job/google_kms_key_ring_import_job.erb @@ -0,0 +1,10 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% gcp_location = "#{external_attribute(pwd, 'gcp_location', doc_generation)}" -%> +<% gcp_name = "#{external_attribute(pwd, 'gcp_name', doc_generation)}" -%> +describe google_kms_key_ring_import_job(project: <%= gcp_project_id -%>, location: <%= gcp_location -%>, name: <%= gcp_name -%>) do + it { should exist } +end + +describe google_kms_key_ring_import_job(project: <%= gcp_project_id -%>, location: 'nonexistent', name: 'nonexistent') do + it { should_not exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_kms_key_ring_import_job/google_kms_key_ring_import_job_attributes.erb b/mmv1/templates/inspec/examples/google_kms_key_ring_import_job/google_kms_key_ring_import_job_attributes.erb new file mode 100644 index 000000000..5f884323e --- /dev/null +++ b/mmv1/templates/inspec/examples/google_kms_key_ring_import_job/google_kms_key_ring_import_job_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') +gcp_location = input(:gcp_location, value: '<%= external_attribute(pwd, 'gcp_location') -%>', description: 'GCP location') +gcp_name = input(:gcp_name, value: '<%= external_attribute(pwd, 'gcp_name') -%>', description: 'GCP Name') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_kms_key_ring_import_job/google_kms_key_ring_import_jobs.erb b/mmv1/templates/inspec/examples/google_kms_key_ring_import_job/google_kms_key_ring_import_jobs.erb new file mode 100644 index 000000000..798f5ebb8 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_kms_key_ring_import_job/google_kms_key_ring_import_jobs.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% gcp_location = "#{external_attribute(pwd, 'gcp_location', doc_generation)}" -%> +describe google_kms_key_ring_import_job(project: <%= gcp_project_id -%>, location: <%= gcp_location -%>) do + it { should exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_metastore_project_location_federation/google_metastore_project_location_federation.erb b/mmv1/templates/inspec/examples/google_metastore_project_location_federation/google_metastore_project_location_federation.erb new file mode 100644 index 000000000..5d84350f4 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_metastore_project_location_federation/google_metastore_project_location_federation.erb @@ -0,0 +1,18 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% project_location_federation = grab_attributes(pwd)['project_location_federation'] -%> +describe google_metastore_project_location_federation(name: <%= doc_generation ? "' #{project_location_federation['name']}'":"project_location_federation['name']" -%>) do + it { should exist } + its('name') { should cmp <%= doc_generation ? "'#{project_location_federation['name']}'" : "project_location_federation['name']" -%> } + its('create_time') { should cmp <%= doc_generation ? "'#{project_location_federation['create_time']}'" : "project_location_federation['create_time']" -%> } + its('update_time') { should cmp <%= doc_generation ? "'#{project_location_federation['update_time']}'" : "project_location_federation['update_time']" -%> } + its('version') { should cmp <%= doc_generation ? "'#{project_location_federation['version']}'" : "project_location_federation['version']" -%> } + its('endpoint_uri') { should cmp <%= doc_generation ? "'#{project_location_federation['endpoint_uri']}'" : "project_location_federation['endpoint_uri']" -%> } + its('state') { should cmp <%= doc_generation ? "'#{project_location_federation['state']}'" : "project_location_federation['state']" -%> } + its('state_message') { should cmp <%= doc_generation ? "'#{project_location_federation['state_message']}'" : "project_location_federation['state_message']" -%> } + its('uid') { should cmp <%= doc_generation ? "'#{project_location_federation['uid']}'" : "project_location_federation['uid']" -%> } + +end + +describe google_metastore_project_location_federation(name: "does_not_exit") do + it { should_not exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_metastore_project_location_federation/google_metastore_project_location_federation_attributes.erb b/mmv1/templates/inspec/examples/google_metastore_project_location_federation/google_metastore_project_location_federation_attributes.erb new file mode 100644 index 000000000..02a0d502b --- /dev/null +++ b/mmv1/templates/inspec/examples/google_metastore_project_location_federation/google_metastore_project_location_federation_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + project_location_federation = input('project_location_federation', value: <%= JSON.pretty_generate(grab_attributes(pwd)['project_location_federation']) -%>, description: 'project_location_federation description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_metastore_project_location_federation/google_metastore_project_location_federations.erb b/mmv1/templates/inspec/examples/google_metastore_project_location_federation/google_metastore_project_location_federations.erb new file mode 100644 index 000000000..47563c889 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_metastore_project_location_federation/google_metastore_project_location_federations.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% project_location_federation = grab_attributes(pwd)['project_location_federation'] -%> + describe google_metastore_project_location_federations(parent: <%= doc_generation ? "' #{project_location_federation['parent']}'":"project_location_federation['parent']" -%>) do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_metastore_project_location_service/google_metastore_project_location_service.erb b/mmv1/templates/inspec/examples/google_metastore_project_location_service/google_metastore_project_location_service.erb new file mode 100644 index 000000000..34bb1eea0 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_metastore_project_location_service/google_metastore_project_location_service.erb @@ -0,0 +1,22 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% project_location_service = grab_attributes(pwd)['project_location_service'] -%> +describe google_metastore_project_location_service(name: <%= doc_generation ? "' #{project_location_service['name']}'":"project_location_service['name']" -%>) do + it { should exist } + its('name') { should cmp <%= doc_generation ? "'#{project_location_service['name']}'" : "project_location_service['name']" -%> } + its('create_time') { should cmp <%= doc_generation ? "'#{project_location_service['create_time']}'" : "project_location_service['create_time']" -%> } + its('update_time') { should cmp <%= doc_generation ? "'#{project_location_service['update_time']}'" : "project_location_service['update_time']" -%> } + its('network') { should cmp <%= doc_generation ? "'#{project_location_service['network']}'" : "project_location_service['network']" -%> } + its('endpoint_uri') { should cmp <%= doc_generation ? "'#{project_location_service['endpoint_uri']}'" : "project_location_service['endpoint_uri']" -%> } + its('state') { should cmp <%= doc_generation ? "'#{project_location_service['state']}'" : "project_location_service['state']" -%> } + its('state_message') { should cmp <%= doc_generation ? "'#{project_location_service['state_message']}'" : "project_location_service['state_message']" -%> } + its('artifact_gcs_uri') { should cmp <%= doc_generation ? "'#{project_location_service['artifact_gcs_uri']}'" : "project_location_service['artifact_gcs_uri']" -%> } + its('tier') { should cmp <%= doc_generation ? "'#{project_location_service['tier']}'" : "project_location_service['tier']" -%> } + its('uid') { should cmp <%= doc_generation ? "'#{project_location_service['uid']}'" : "project_location_service['uid']" -%> } + its('release_channel') { should cmp <%= doc_generation ? "'#{project_location_service['release_channel']}'" : "project_location_service['release_channel']" -%> } + its('database_type') { should cmp <%= doc_generation ? "'#{project_location_service['database_type']}'" : "project_location_service['database_type']" -%> } + +end + +describe google_metastore_project_location_service(name: "does_not_exit") do + it { should_not exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_metastore_project_location_service/google_metastore_project_location_service_attributes.erb b/mmv1/templates/inspec/examples/google_metastore_project_location_service/google_metastore_project_location_service_attributes.erb new file mode 100644 index 000000000..7527a4f67 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_metastore_project_location_service/google_metastore_project_location_service_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + project_location_service = input('project_location_service', value: <%= JSON.pretty_generate(grab_attributes(pwd)['project_location_service']) -%>, description: 'project_location_service description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_metastore_project_location_service/google_metastore_project_location_services.erb b/mmv1/templates/inspec/examples/google_metastore_project_location_service/google_metastore_project_location_services.erb new file mode 100644 index 000000000..d1a112db2 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_metastore_project_location_service/google_metastore_project_location_services.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% project_location_service = grab_attributes(pwd)['project_location_service'] -%> + describe google_metastore_project_location_services(parent: <%= doc_generation ? "' #{project_location_service['parent']}'":"project_location_service['parent']" -%>) do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_monitoring_project_group/google_monitoring_project_group.erb b/mmv1/templates/inspec/examples/google_monitoring_project_group/google_monitoring_project_group.erb new file mode 100644 index 000000000..9213ef574 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_monitoring_project_group/google_monitoring_project_group.erb @@ -0,0 +1,14 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% project_group = grab_attributes(pwd)['project_group'] -%> +describe google_monitoring_project_group(name: <%= doc_generation ? "' #{project_group['name']}'":"project_group['name']" -%>) do + it { should exist } + its('name') { should cmp <%= doc_generation ? "'#{project_group['name']}'" : "project_group['name']" -%> } + its('display_name') { should cmp <%= doc_generation ? "'#{project_group['display_name']}'" : "project_group['display_name']" -%> } + its('parent_name') { should cmp <%= doc_generation ? "'#{project_group['parent_name']}'" : "project_group['parent_name']" -%> } + its('filter') { should cmp <%= doc_generation ? "'#{project_group['filter']}'" : "project_group['filter']" -%> } + +end + +describe google_monitoring_project_group(name: "does_not_exit") do + it { should_not exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_monitoring_project_group/google_monitoring_project_group_attributes.erb b/mmv1/templates/inspec/examples/google_monitoring_project_group/google_monitoring_project_group_attributes.erb new file mode 100644 index 000000000..b0d3e05ad --- /dev/null +++ b/mmv1/templates/inspec/examples/google_monitoring_project_group/google_monitoring_project_group_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + project_group = input('project_group', value: <%= JSON.pretty_generate(grab_attributes(pwd)['project_group']) -%>, description: 'project_group description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_monitoring_project_group/google_monitoring_project_groups.erb b/mmv1/templates/inspec/examples/google_monitoring_project_group/google_monitoring_project_groups.erb new file mode 100644 index 000000000..4c81f2b9f --- /dev/null +++ b/mmv1/templates/inspec/examples/google_monitoring_project_group/google_monitoring_project_groups.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% project_group = grab_attributes(pwd)['project_group'] -%> + describe google_monitoring_project_groups(name: <%= doc_generation ? "' #{project_group['name']}'":"project_group['name']" -%>) do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_orgpolicy_folder_constraint/google_orgpolicy_folder_constraint.erb b/mmv1/templates/inspec/examples/google_orgpolicy_folder_constraint/google_orgpolicy_folder_constraint.erb new file mode 100644 index 000000000..e371a781e --- /dev/null +++ b/mmv1/templates/inspec/examples/google_orgpolicy_folder_constraint/google_orgpolicy_folder_constraint.erb @@ -0,0 +1,10 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% folder_constraint = grab_attributes(pwd)['folder_constraint'] -%> +describe google_orgpolicy_folder_constraint() do + it { should exist } + +end + +describe google_orgpolicy_folder_constraint() do + it { should_not exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_orgpolicy_folder_constraint/google_orgpolicy_folder_constraint_attributes.erb b/mmv1/templates/inspec/examples/google_orgpolicy_folder_constraint/google_orgpolicy_folder_constraint_attributes.erb new file mode 100644 index 000000000..034048e72 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_orgpolicy_folder_constraint/google_orgpolicy_folder_constraint_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + folder_constraint = input('folder_constraint', value: <%= JSON.pretty_generate(grab_attributes(pwd)['folder_constraint']) -%>, description: 'folder_constraint description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_orgpolicy_folder_constraint/google_orgpolicy_folder_constraints.erb b/mmv1/templates/inspec/examples/google_orgpolicy_folder_constraint/google_orgpolicy_folder_constraints.erb new file mode 100644 index 000000000..6cc0994fa --- /dev/null +++ b/mmv1/templates/inspec/examples/google_orgpolicy_folder_constraint/google_orgpolicy_folder_constraints.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% folder_constraint = grab_attributes(pwd)['folder_constraint'] -%> + describe google_orgpolicy_folder_constraints(parent: <%= doc_generation ? "' #{folder_constraint['parent']}'":"folder_constraint['parent']" -%>) do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_orgpolicy_folder_policy/google_orgpolicy_folder_policies.erb b/mmv1/templates/inspec/examples/google_orgpolicy_folder_policy/google_orgpolicy_folder_policies.erb new file mode 100644 index 000000000..99e1af466 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_orgpolicy_folder_policy/google_orgpolicy_folder_policies.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% folder_policy = grab_attributes(pwd)['folder_policy'] -%> + describe google_orgpolicy_folder_policies(parent: <%= doc_generation ? "' #{folder_policy['parent']}'":"folder_policy['parent']" -%>) do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_orgpolicy_folder_policy/google_orgpolicy_folder_policy.erb b/mmv1/templates/inspec/examples/google_orgpolicy_folder_policy/google_orgpolicy_folder_policy.erb new file mode 100644 index 000000000..06fb2c861 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_orgpolicy_folder_policy/google_orgpolicy_folder_policy.erb @@ -0,0 +1,11 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% folder_policy = grab_attributes(pwd)['folder_policy'] -%> +describe google_orgpolicy_folder_policy(name: <%= doc_generation ? "' #{folder_policy['name']}'":"folder_policy['name']" -%>) do + it { should exist } + its('name') { should cmp <%= doc_generation ? "'#{folder_policy['name']}'" : "folder_policy['name']" -%> } + +end + +describe google_orgpolicy_folder_policy(name: "does_not_exit") do + it { should_not exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_orgpolicy_folder_policy/google_orgpolicy_folder_policy_attributes.erb b/mmv1/templates/inspec/examples/google_orgpolicy_folder_policy/google_orgpolicy_folder_policy_attributes.erb new file mode 100644 index 000000000..1e7ebbac2 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_orgpolicy_folder_policy/google_orgpolicy_folder_policy_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + folder_policy = input('folder_policy', value: <%= JSON.pretty_generate(grab_attributes(pwd)['folder_policy']) -%>, description: 'folder_policy description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_orgpolicy_organization_constraint/google_orgpolicy_organization_constraint.erb b/mmv1/templates/inspec/examples/google_orgpolicy_organization_constraint/google_orgpolicy_organization_constraint.erb new file mode 100644 index 000000000..68a491740 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_orgpolicy_organization_constraint/google_orgpolicy_organization_constraint.erb @@ -0,0 +1,10 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% organization_constraint = grab_attributes(pwd)['organization_constraint'] -%> +describe google_orgpolicy_organization_constraint() do + it { should exist } + +end + +describe google_orgpolicy_organization_constraint() do + it { should_not exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_orgpolicy_organization_constraint/google_orgpolicy_organization_constraint_attributes.erb b/mmv1/templates/inspec/examples/google_orgpolicy_organization_constraint/google_orgpolicy_organization_constraint_attributes.erb new file mode 100644 index 000000000..21007e639 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_orgpolicy_organization_constraint/google_orgpolicy_organization_constraint_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + organization_constraint = input('organization_constraint', value: <%= JSON.pretty_generate(grab_attributes(pwd)['organization_constraint']) -%>, description: 'organization_constraint description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_orgpolicy_organization_constraint/google_orgpolicy_organization_constraints.erb b/mmv1/templates/inspec/examples/google_orgpolicy_organization_constraint/google_orgpolicy_organization_constraints.erb new file mode 100644 index 000000000..a3b5c635d --- /dev/null +++ b/mmv1/templates/inspec/examples/google_orgpolicy_organization_constraint/google_orgpolicy_organization_constraints.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% organization_constraint = grab_attributes(pwd)['organization_constraint'] -%> + describe google_orgpolicy_organization_constraints(parent: <%= doc_generation ? "' #{organization_constraint['parent']}'":"organization_constraint['parent']" -%>) do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_orgpolicy_project_constraint/google_orgpolicy_project_constraint.erb b/mmv1/templates/inspec/examples/google_orgpolicy_project_constraint/google_orgpolicy_project_constraint.erb new file mode 100644 index 000000000..ae944a8b8 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_orgpolicy_project_constraint/google_orgpolicy_project_constraint.erb @@ -0,0 +1,10 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% project_constraint = grab_attributes(pwd)['project_constraint'] -%> +describe google_orgpolicy_project_constraint() do + it { should exist } + +end + +describe google_orgpolicy_project_constraint() do + it { should_not exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_orgpolicy_project_constraint/google_orgpolicy_project_constraint_attributes.erb b/mmv1/templates/inspec/examples/google_orgpolicy_project_constraint/google_orgpolicy_project_constraint_attributes.erb new file mode 100644 index 000000000..fb06eec85 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_orgpolicy_project_constraint/google_orgpolicy_project_constraint_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + project_constraint = input('project_constraint', value: <%= JSON.pretty_generate(grab_attributes(pwd)['project_constraint']) -%>, description: 'project_constraint description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_orgpolicy_project_constraint/google_orgpolicy_project_constraints.erb b/mmv1/templates/inspec/examples/google_orgpolicy_project_constraint/google_orgpolicy_project_constraints.erb new file mode 100644 index 000000000..8b18c8c0d --- /dev/null +++ b/mmv1/templates/inspec/examples/google_orgpolicy_project_constraint/google_orgpolicy_project_constraints.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% project_constraint = grab_attributes(pwd)['project_constraint'] -%> + describe google_orgpolicy_project_constraints(parent: <%= doc_generation ? "' #{project_constraint['parent']}'":"project_constraint['parent']" -%>) do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_run_service/google_run_service.erb b/mmv1/templates/inspec/examples/google_run_service/google_run_service.erb new file mode 100644 index 000000000..e6e150cc6 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_run_service/google_run_service.erb @@ -0,0 +1,10 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% service = grab_attributes(pwd)['service'] -%> +describe google_run_service(name: <%= doc_generation ? "' #{service['name']}'":"service['name']" -%>) do + it { should exist } + +end + +describe google_run_service(name: "does_not_exit") do + it { should_not exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_run_service/google_run_service_attributes.erb b/mmv1/templates/inspec/examples/google_run_service/google_run_service_attributes.erb new file mode 100644 index 000000000..032fa2e6b --- /dev/null +++ b/mmv1/templates/inspec/examples/google_run_service/google_run_service_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + service = input('service', value: <%= JSON.pretty_generate(grab_attributes(pwd)['service']) -%>, description: 'service description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_run_service/google_run_services.erb b/mmv1/templates/inspec/examples/google_run_service/google_run_services.erb new file mode 100644 index 000000000..aee4c1c91 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_run_service/google_run_services.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% service = grab_attributes(pwd)['service'] -%> + describe google_run_services(parent: <%= doc_generation ? "' #{service['parent']}'":"service['parent']" -%>) do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_secret_manager_secret/google_secret_manager_secret.erb b/mmv1/templates/inspec/examples/google_secret_manager_secret/google_secret_manager_secret.erb new file mode 100644 index 000000000..a8be7f594 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_secret_manager_secret/google_secret_manager_secret.erb @@ -0,0 +1,10 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% project_secret = grab_attributes(pwd)['project_secret'] -%> +describe google_secret_manager_secret(name: <%= doc_generation ? "' #{project_secret['name']}'":"project_secret['name']" -%>) do + it { should exist } + +end + +describe google_secret_manager_secret(name: "does_not_exit") do + it { should_not exist } +end diff --git a/mmv1/templates/inspec/examples/google_secret_manager_secret/google_secret_manager_secret_attributes.erb b/mmv1/templates/inspec/examples/google_secret_manager_secret/google_secret_manager_secret_attributes.erb new file mode 100644 index 000000000..afaf93f91 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_secret_manager_secret/google_secret_manager_secret_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + project_secret = input('project_secret', value: <%= JSON.pretty_generate(grab_attributes(pwd)['project_secret']) -%>, description: 'project_secret description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_secret_manager_secret/google_secret_manager_secrets.erb b/mmv1/templates/inspec/examples/google_secret_manager_secret/google_secret_manager_secrets.erb new file mode 100644 index 000000000..25960351d --- /dev/null +++ b/mmv1/templates/inspec/examples/google_secret_manager_secret/google_secret_manager_secrets.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% project_secret = grab_attributes(pwd)['project_secret'] -%> + describe google_secret_manager_secrets(parent: <%= doc_generation ? "' #{project_secret['parent']}'":"project_secret['parent']" -%>) do + it { should exist } + end diff --git a/mmv1/templates/inspec/examples/google_secret_manager_secret_version/google_secret_manager_secret_version.erb b/mmv1/templates/inspec/examples/google_secret_manager_secret_version/google_secret_manager_secret_version.erb new file mode 100644 index 000000000..a8be7f594 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_secret_manager_secret_version/google_secret_manager_secret_version.erb @@ -0,0 +1,10 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% project_secret = grab_attributes(pwd)['project_secret'] -%> +describe google_secret_manager_secret(name: <%= doc_generation ? "' #{project_secret['name']}'":"project_secret['name']" -%>) do + it { should exist } + +end + +describe google_secret_manager_secret(name: "does_not_exit") do + it { should_not exist } +end diff --git a/mmv1/templates/inspec/examples/google_secret_manager_secret_version/google_secret_manager_secret_version_attributes.erb b/mmv1/templates/inspec/examples/google_secret_manager_secret_version/google_secret_manager_secret_version_attributes.erb new file mode 100644 index 000000000..afaf93f91 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_secret_manager_secret_version/google_secret_manager_secret_version_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + project_secret = input('project_secret', value: <%= JSON.pretty_generate(grab_attributes(pwd)['project_secret']) -%>, description: 'project_secret description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_secret_manager_secret_version/google_secret_manager_secret_versions.erb b/mmv1/templates/inspec/examples/google_secret_manager_secret_version/google_secret_manager_secret_versions.erb new file mode 100644 index 000000000..25960351d --- /dev/null +++ b/mmv1/templates/inspec/examples/google_secret_manager_secret_version/google_secret_manager_secret_versions.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% project_secret = grab_attributes(pwd)['project_secret'] -%> + describe google_secret_manager_secrets(parent: <%= doc_generation ? "' #{project_secret['parent']}'":"project_secret['parent']" -%>) do + it { should exist } + end diff --git a/mmv1/templates/inspec/examples/google_service_networking_service_connection/google_service_networking_service_connection.erb b/mmv1/templates/inspec/examples/google_service_networking_service_connection/google_service_networking_service_connection.erb new file mode 100644 index 000000000..c3cbff890 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_service_networking_service_connection/google_service_networking_service_connection.erb @@ -0,0 +1,10 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% service_connection = grab_attributes(pwd)['service_connection'] -%> +describe google_service_networking_service_connection() do + it { should exist } + +end + +describe google_service_networking_service_connection() do + it { should_not exist } +end diff --git a/mmv1/templates/inspec/examples/google_service_networking_service_connection/google_service_networking_service_connection_attributes.erb b/mmv1/templates/inspec/examples/google_service_networking_service_connection/google_service_networking_service_connection_attributes.erb new file mode 100644 index 000000000..097b6e347 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_service_networking_service_connection/google_service_networking_service_connection_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + service_connection = input('service_connection', value: <%= JSON.pretty_generate(grab_attributes(pwd)['service_connection']) -%>, description: 'service_connection description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_service_networking_service_connection/google_service_networking_service_connections.erb b/mmv1/templates/inspec/examples/google_service_networking_service_connection/google_service_networking_service_connections.erb new file mode 100644 index 000000000..4d55c47ed --- /dev/null +++ b/mmv1/templates/inspec/examples/google_service_networking_service_connection/google_service_networking_service_connections.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% service_connection = grab_attributes(pwd)['service_connection'] -%> + describe google_servicenetworking_service_connections(parent: <%= doc_generation ? "' #{service_connection['parent']}'":"service_connection['parent']" -%>) do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_sql_database_instance/google_sql_database_instance.erb b/mmv1/templates/inspec/examples/google_sql_database_instance/google_sql_database_instance.erb index 728842daa..7656aec4c 100644 --- a/mmv1/templates/inspec/examples/google_sql_database_instance/google_sql_database_instance.erb +++ b/mmv1/templates/inspec/examples/google_sql_database_instance/google_sql_database_instance.erb @@ -2,7 +2,7 @@ <% gcp_location = "#{external_attribute(pwd, 'gcp_location', doc_generation)}" -%> <% gcp_db_instance_name = "#{external_attribute(pwd, 'gcp_db_instance_name', doc_generation)}" -%> -describe google_sql_database_instance(project: <%= gcp_project_id -%>, database: <%= gcp_db_instance_name -%>) do +describe google_sql_database_instance(project: <%= gcp_project_id -%>, instance: <%= gcp_db_instance_name -%>) do it { should exist } its('state') { should eq 'RUNNABLE' } its('backend_type') { should eq 'SECOND_GEN' } diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_batch_prediction_job/google_vertex_ai_batch_prediction_job.erb b/mmv1/templates/inspec/examples/google_vertex_ai_batch_prediction_job/google_vertex_ai_batch_prediction_job.erb new file mode 100644 index 000000000..aaf2804e6 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_batch_prediction_job/google_vertex_ai_batch_prediction_job.erb @@ -0,0 +1,20 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% batch_prediction_job = grab_attributes(pwd)['batch_prediction_job'] -%> +describe google_vertex_ai_batch_prediction_job(name: "projects/#{gcp_project_id}/locations/#{batch_prediction_job['region']}/batchPredictionJobs/#{batch_prediction_job['name']}", region: <%= doc_generation ? "' #{batch_prediction_job['region']}'":"batch_prediction_job['region']" -%>) do + it { should exist } + its('create_time') { should cmp <%= doc_generation ? "'#{batch_prediction_job['create_time']}'" : "batch_prediction_job['create_time']" -%> } + its('model_version_id') { should cmp <%= doc_generation ? "'#{batch_prediction_job['model_version_id']}'" : "batch_prediction_job['model_version_id']" -%> } + its('end_time') { should cmp <%= doc_generation ? "'#{batch_prediction_job['end_time']}'" : "batch_prediction_job['end_time']" -%> } + its('start_time') { should cmp <%= doc_generation ? "'#{batch_prediction_job['start_time']}'" : "batch_prediction_job['start_time']" -%> } + its('update_time') { should cmp <%= doc_generation ? "'#{batch_prediction_job['update_time']}'" : "batch_prediction_job['update_time']" -%> } + its('name') { should cmp <%= doc_generation ? "'#{batch_prediction_job['name']}'" : "batch_prediction_job['name']" -%> } + its('state') { should cmp <%= doc_generation ? "'#{batch_prediction_job['state']}'" : "batch_prediction_job['state']" -%> } + its('model') { should cmp <%= doc_generation ? "'#{batch_prediction_job['model']}'" : "batch_prediction_job['model']" -%> } + its('display_name') { should cmp <%= doc_generation ? "'#{batch_prediction_job['display_name']}'" : "batch_prediction_job['display_name']" -%> } + its('service_account') { should cmp <%= doc_generation ? "'#{batch_prediction_job['service_account']}'" : "batch_prediction_job['service_account']" -%> } + +end + +describe google_vertex_ai_batch_prediction_job(name: "does_not_exit", region: <%= doc_generation ? "' #{batch_prediction_job['region']}'":"batch_prediction_job['region']" -%>) do + it { should_not exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_batch_prediction_job/google_vertex_ai_batch_prediction_job_attributes.erb b/mmv1/templates/inspec/examples/google_vertex_ai_batch_prediction_job/google_vertex_ai_batch_prediction_job_attributes.erb new file mode 100644 index 000000000..5b438e988 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_batch_prediction_job/google_vertex_ai_batch_prediction_job_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + batch_prediction_job = input('batch_prediction_job', value: <%= JSON.pretty_generate(grab_attributes(pwd)['batch_prediction_job']) -%>, description: 'batch_prediction_job description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_batch_prediction_job/google_vertex_ai_batch_prediction_jobs.erb b/mmv1/templates/inspec/examples/google_vertex_ai_batch_prediction_job/google_vertex_ai_batch_prediction_jobs.erb new file mode 100644 index 000000000..e685f1167 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_batch_prediction_job/google_vertex_ai_batch_prediction_jobs.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% batch_prediction_job = grab_attributes(pwd)['batch_prediction_job'] -%> +describe google_vertex_ai_batch_prediction_jobs(parent: "projects/#{gcp_project_id}/locations/#{batch_prediction_job['region']}", region: <%= doc_generation ? "' #{batch_prediction_job['region']}'":"batch_prediction_job['region']" -%>) do + it { should exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_custom_job/google_vertex_ai_custom_job.erb b/mmv1/templates/inspec/examples/google_vertex_ai_custom_job/google_vertex_ai_custom_job.erb new file mode 100644 index 000000000..342b2b4ff --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_custom_job/google_vertex_ai_custom_job.erb @@ -0,0 +1,7 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% custom_job = grab_attributes(pwd)['custom_job'] -%> + +describe google_vertex_ai_custom_job(name: "projects/#{<%= gcp_project_id -%>}/locations/#{<%= doc_generation ? "'#{custom_job['region']}'" : "custom_job['region']" -%>}/customJobs/#{<%= doc_generation ? "'#{custom_job['job_id']}'" : "custom_job['job_id']" -%>}", region: <%= doc_generation ? "' #{custom_job['region']}'":"custom_job['region']" -%>) do + it { should exist } + its('display_name') { should cmp custom_job['name'] } +end diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_custom_job/google_vertex_ai_custom_job_attributes.erb b/mmv1/templates/inspec/examples/google_vertex_ai_custom_job/google_vertex_ai_custom_job_attributes.erb new file mode 100644 index 000000000..01c5ffc43 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_custom_job/google_vertex_ai_custom_job_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + custom_job = input('custom_job', value: <%= JSON.pretty_generate(grab_attributes(pwd)['custom_job']) -%>, description: 'custom_job description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_custom_job/google_vertex_ai_custom_jobs.erb b/mmv1/templates/inspec/examples/google_vertex_ai_custom_job/google_vertex_ai_custom_jobs.erb new file mode 100644 index 000000000..17ba8a40f --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_custom_job/google_vertex_ai_custom_jobs.erb @@ -0,0 +1,8 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% custom_job = grab_attributes(pwd)['custom_job'] -%> + + +describe google_vertex_ai_custom_jobs(name: "projects/#{<%= gcp_project_id -%>}/locations/#{<%= doc_generation ? "'#{custom_job['region']}'" : "custom_job['region']" -%>}", region: <%= doc_generation ? "' #{custom_job['region']}'":"custom_job['region']" -%>) do + it { should exist } + its ('display_names') { should include custom_job['name']} +end diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_dataset/google_vertex_ai_dataset.erb b/mmv1/templates/inspec/examples/google_vertex_ai_dataset/google_vertex_ai_dataset.erb new file mode 100644 index 000000000..7387f165a --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_dataset/google_vertex_ai_dataset.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% dataset = grab_attributes(pwd)['dataset'] -%> + describe google_vertex_ai_dataset(name: <%= doc_generation ? "' #{dataset['name']}'":"dataset['name']" -%>, region: <%= doc_generation ? "' #{dataset['region']}'":"dataset['region']" -%>) do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_dataset/google_vertex_ai_dataset_attributes.erb b/mmv1/templates/inspec/examples/google_vertex_ai_dataset/google_vertex_ai_dataset_attributes.erb new file mode 100644 index 000000000..c0bf6f8b7 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_dataset/google_vertex_ai_dataset_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + dataset = input('dataset', value: <%= JSON.pretty_generate(grab_attributes(pwd)['dataset']) -%>, description: 'dataset description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_dataset/google_vertex_ai_datasets.erb b/mmv1/templates/inspec/examples/google_vertex_ai_dataset/google_vertex_ai_datasets.erb new file mode 100644 index 000000000..239b83df6 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_dataset/google_vertex_ai_datasets.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% dataset = grab_attributes(pwd)['dataset'] -%> + describe google_vertex_ai_dataset(parent: <%= doc_generation ? "' #{dataset['parent']}'":"dataset['parent']" -%>, region: <%= doc_generation ? "' #{dataset['region']}'":"dataset['region']" -%>) do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_dataset_data_item_annotation/google_vertex_ai_dataset_data_item_annotation_attributes.erb b/mmv1/templates/inspec/examples/google_vertex_ai_dataset_data_item_annotation/google_vertex_ai_dataset_data_item_annotation_attributes.erb new file mode 100644 index 000000000..41078c849 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_dataset_data_item_annotation/google_vertex_ai_dataset_data_item_annotation_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + dataset_data_item_annotation = input('dataset_data_item_annotation', value: <%= JSON.pretty_generate(grab_attributes(pwd)['dataset_data_item_annotation']) -%>, description: 'dataset_data_item_annotation description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_dataset_data_item_annotation/google_vertex_ai_dataset_data_item_annotations.erb b/mmv1/templates/inspec/examples/google_vertex_ai_dataset_data_item_annotation/google_vertex_ai_dataset_data_item_annotations.erb new file mode 100644 index 000000000..19c9eb7bf --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_dataset_data_item_annotation/google_vertex_ai_dataset_data_item_annotations.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% dataset_data_item_annotation = grab_attributes(pwd)['dataset_data_item_annotation'] -%> + describe google_vertex_ai_dataset_data_item_annotations(parent: "projects/#{gcp_project_id}/locations/#{dataset_data_item_annotation['region']}/datasets/#{dataset_data_item_annotation['dataset']}/dataItems/#{dataset_data_item_annotation['dataItem']}", region: <%= doc_generation ? "' #{dataset_data_item_annotation['region']}'":"dataset_data_item_annotation['region']" -%>) do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_datasets_annotation_spec/google_vertex_ai_datasets_annotation_spec.erb b/mmv1/templates/inspec/examples/google_vertex_ai_datasets_annotation_spec/google_vertex_ai_datasets_annotation_spec.erb new file mode 100644 index 000000000..79d0eef02 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_datasets_annotation_spec/google_vertex_ai_datasets_annotation_spec.erb @@ -0,0 +1,15 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% datasets_annotation_spec = grab_attributes(pwd)['datasets_annotation_spec'] -%> +describe google_vertex_ai_datasets_annotation_spec(name: "projects/#{gcp_project_id}/locations/#{datasets_annotation_spec['region']}/datasets/#{datasets_annotation_spec['dataset']}/annotationSpecs/#{datasets_annotation_spec['name']}", region: <%= doc_generation ? "' #{datasets_annotation_spec['region']}'":"datasets_annotation_spec['region']" -%>) do + it { should exist } + its('display_name') { should cmp <%= doc_generation ? "'#{datasets_annotation_spec['display_name']}'" : "datasets_annotation_spec['display_name']" -%> } + its('name') { should cmp <%= doc_generation ? "'#{datasets_annotation_spec['name']}'" : "datasets_annotation_spec['name']" -%> } + its('etag') { should cmp <%= doc_generation ? "'#{datasets_annotation_spec['etag']}'" : "datasets_annotation_spec['etag']" -%> } + its('create_time') { should cmp <%= doc_generation ? "'#{datasets_annotation_spec['create_time']}'" : "datasets_annotation_spec['create_time']" -%> } + its('update_time') { should cmp <%= doc_generation ? "'#{datasets_annotation_spec['update_time']}'" : "datasets_annotation_spec['update_time']" -%> } + +end + +describe google_vertex_ai_datasets_annotation_spec(name: "does_not_exit", region: <%= doc_generation ? "' #{datasets_annotation_spec['region']}'":"datasets_annotation_spec['region']" -%>) do + it { should_not exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_datasets_annotation_spec/google_vertex_ai_datasets_annotation_spec_attributes.erb b/mmv1/templates/inspec/examples/google_vertex_ai_datasets_annotation_spec/google_vertex_ai_datasets_annotation_spec_attributes.erb new file mode 100644 index 000000000..f0f47d7f0 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_datasets_annotation_spec/google_vertex_ai_datasets_annotation_spec_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + datasets_annotation_spec = input('datasets_annotation_spec', value: <%= JSON.pretty_generate(grab_attributes(pwd)['datasets_annotation_spec']) -%>, description: 'datasets_annotation_spec description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_datasets_data_item/google_vertex_ai_datasets_data_item.erb b/mmv1/templates/inspec/examples/google_vertex_ai_datasets_data_item/google_vertex_ai_datasets_data_item.erb new file mode 100644 index 000000000..2b45f242c --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_datasets_data_item/google_vertex_ai_datasets_data_item.erb @@ -0,0 +1,14 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% datasets_data_item = grab_attributes(pwd)['datasets_data_item'] -%> +describe google_vertex_ai_datasets_data_item() do + it { should exist } + its('update_time') { should cmp <%= doc_generation ? "'#{datasets_data_item['update_time']}'" : "datasets_data_item['update_time']" -%> } + its('etag') { should cmp <%= doc_generation ? "'#{datasets_data_item['etag']}'" : "datasets_data_item['etag']" -%> } + its('name') { should cmp <%= doc_generation ? "'#{datasets_data_item['name']}'" : "datasets_data_item['name']" -%> } + its('create_time') { should cmp <%= doc_generation ? "'#{datasets_data_item['create_time']}'" : "datasets_data_item['create_time']" -%> } + +end + +describe google_vertex_ai_datasets_data_item() do + it { should_not exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_datasets_data_item/google_vertex_ai_datasets_data_item_attributes.erb b/mmv1/templates/inspec/examples/google_vertex_ai_datasets_data_item/google_vertex_ai_datasets_data_item_attributes.erb new file mode 100644 index 000000000..0b5013bbf --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_datasets_data_item/google_vertex_ai_datasets_data_item_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + datasets_data_item = input('datasets_data_item', value: <%= JSON.pretty_generate(grab_attributes(pwd)['datasets_data_item']) -%>, description: 'datasets_data_item description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_datasets_data_item/google_vertex_ai_datasets_data_items.erb b/mmv1/templates/inspec/examples/google_vertex_ai_datasets_data_item/google_vertex_ai_datasets_data_items.erb new file mode 100644 index 000000000..3a68155b4 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_datasets_data_item/google_vertex_ai_datasets_data_items.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% datasets_data_item = grab_attributes(pwd)['datasets_data_item'] -%> + describe google_vertex_ai_datasets_data_items(parent: "projects/#{gcp_project_id}/locations/#{datasets_data_item['region']}/datasets/#{datasets_data_item['dataset']}", region: <%= doc_generation ? "' #{datasets_data_item['region']}'":"datasets_data_item['region']" -%>) do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_datasets_saved_query/google_vertex_ai_datasets_saved_queries.erb b/mmv1/templates/inspec/examples/google_vertex_ai_datasets_saved_query/google_vertex_ai_datasets_saved_queries.erb new file mode 100644 index 000000000..5196e0d22 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_datasets_saved_query/google_vertex_ai_datasets_saved_queries.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% datasets_saved_query = grab_attributes(pwd)['datasets_saved_query'] -%> + describe google_vertex_ai_datasets_saved_queries(parent: "projects/#{gcp_project_id}/locations/#{datasets_saved_query['region']}/datasets/#{datasets_saved_query['dataset']}", region: <%= doc_generation ? "' #{datasets_saved_query['region']}'":"datasets_saved_query['region']" -%>) do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_datasets_saved_query/google_vertex_ai_datasets_saved_query_attributes.erb b/mmv1/templates/inspec/examples/google_vertex_ai_datasets_saved_query/google_vertex_ai_datasets_saved_query_attributes.erb new file mode 100644 index 000000000..00df1c37a --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_datasets_saved_query/google_vertex_ai_datasets_saved_query_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + datasets_saved_query = input('datasets_saved_query', value: <%= JSON.pretty_generate(grab_attributes(pwd)['datasets_saved_query']) -%>, description: 'datasets_saved_query description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_endpoint/google_vertex_ai_endpoint.erb b/mmv1/templates/inspec/examples/google_vertex_ai_endpoint/google_vertex_ai_endpoint.erb new file mode 100644 index 000000000..cfa9f7869 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_endpoint/google_vertex_ai_endpoint.erb @@ -0,0 +1,18 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% endpoint = grab_attributes(pwd)['endpoint'] -%> +describe google_vertex_ai_endpoint(name: "projects/#{gcp_project_id}/locations/#{endpoint['region']}/endpoints/#{endpoint['name']}", region: <%= doc_generation ? "' #{endpoint['region']}'":"endpoint['region']" -%>) do +it { should exist } + its('update_time') { should cmp <%= doc_generation ? "'#{endpoint['update_time']}'" : "endpoint['update_time']" -%> } + its('model_deployment_monitoring_job') { should cmp <%= doc_generation ? "'#{endpoint['model_deployment_monitoring_job']}'" : "endpoint['model_deployment_monitoring_job']" -%> } + its('description') { should cmp <%= doc_generation ? "'#{endpoint['description']}'" : "endpoint['description']" -%> } + its('network') { should cmp <%= doc_generation ? "'#{endpoint['network']}'" : "endpoint['network']" -%> } + its('display_name') { should cmp <%= doc_generation ? "'#{endpoint['display_name']}'" : "endpoint['display_name']" -%> } + its('etag') { should cmp <%= doc_generation ? "'#{endpoint['etag']}'" : "endpoint['etag']" -%> } + its('create_time') { should cmp <%= doc_generation ? "'#{endpoint['create_time']}'" : "endpoint['create_time']" -%> } + its('name') { should cmp <%= doc_generation ? "'#{endpoint['name']}'" : "endpoint['name']" -%> } + +end + +describe google_vertex_ai_endpoint(name: "does_not_exit", region: <%= doc_generation ? "' #{endpoint['region']}'":"endpoint['region']" -%>) do + it { should_not exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_endpoint/google_vertex_ai_endpoint_attributes.erb b/mmv1/templates/inspec/examples/google_vertex_ai_endpoint/google_vertex_ai_endpoint_attributes.erb new file mode 100644 index 000000000..1b71417bc --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_endpoint/google_vertex_ai_endpoint_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + endpoint = input('endpoint', value: <%= JSON.pretty_generate(grab_attributes(pwd)['endpoint']) -%>, description: 'endpoint description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_endpoint/google_vertex_ai_endpoints.erb b/mmv1/templates/inspec/examples/google_vertex_ai_endpoint/google_vertex_ai_endpoints.erb new file mode 100644 index 000000000..6e647dcba --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_endpoint/google_vertex_ai_endpoints.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% endpoint = grab_attributes(pwd)['endpoint'] -%> + describe google_vertex_ai_endpoints(parent: "projects/#{gcp_project_id}/locations/#{endpoint['region']}", region: <%= doc_generation ? "' #{endpoint['region']}'":"endpoint['region']" -%>) do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_featurestore/google_vertex_ai_featurestore.erb b/mmv1/templates/inspec/examples/google_vertex_ai_featurestore/google_vertex_ai_featurestore.erb new file mode 100644 index 000000000..c8ac7483a --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_featurestore/google_vertex_ai_featurestore.erb @@ -0,0 +1,15 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% featurestore = grab_attributes(pwd)['featurestore'] -%> +describe google_vertex_ai_featurestore(name: "projects/#{gcp_project_id}/locations/#{featurestore['region']}/featurestores/#{featurestore['name']}", region: <%= doc_generation ? "' #{featurestore['region']}'":"featurestore['region']" -%>) do + it { should exist } + its('state') { should cmp <%= doc_generation ? "'#{featurestore['state']}'" : "featurestore['state']" -%> } + its('create_time') { should cmp <%= doc_generation ? "'#{featurestore['create_time']}'" : "featurestore['create_time']" -%> } + its('etag') { should cmp <%= doc_generation ? "'#{featurestore['etag']}'" : "featurestore['etag']" -%> } + its('update_time') { should cmp <%= doc_generation ? "'#{featurestore['update_time']}'" : "featurestore['update_time']" -%> } + its('name') { should cmp <%= doc_generation ? "'#{featurestore['name']}'" : "featurestore['name']" -%> } + +end + +describe google_vertex_ai_featurestore(name: "does_not_exit", region: <%= doc_generation ? "' #{featurestore['region']}'":"featurestore['region']" -%>) do + it { should_not exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_featurestore/google_vertex_ai_featurestore_attributes.erb b/mmv1/templates/inspec/examples/google_vertex_ai_featurestore/google_vertex_ai_featurestore_attributes.erb new file mode 100644 index 000000000..09eba9c3b --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_featurestore/google_vertex_ai_featurestore_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + featurestore = input('featurestore', value: <%= JSON.pretty_generate(grab_attributes(pwd)['featurestore']) -%>, description: 'featurestore description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_featurestore/google_vertex_ai_featurestores.erb b/mmv1/templates/inspec/examples/google_vertex_ai_featurestore/google_vertex_ai_featurestores.erb new file mode 100644 index 000000000..a8766c1af --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_featurestore/google_vertex_ai_featurestores.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% featurestore = grab_attributes(pwd)['featurestore'] -%> + describe google_vertex_ai_featurestores(parent: "projects/#{gcp_project_id}/locations/#{featurestore['region']}", region: <%= doc_generation ? "' #{featurestore['region']}'":"featurestore['region']" -%>) do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_featurestore_entity_type_feature/google_vertex_ai_featurestore_entity_type_feature.erb b/mmv1/templates/inspec/examples/google_vertex_ai_featurestore_entity_type_feature/google_vertex_ai_featurestore_entity_type_feature.erb new file mode 100644 index 000000000..ebfb78b2b --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_featurestore_entity_type_feature/google_vertex_ai_featurestore_entity_type_feature.erb @@ -0,0 +1,16 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% featurestore_entity_type_feature = grab_attributes(pwd)['featurestore_entity_type_feature'] -%> +describe google_vertex_ai_featurestore_entity_type_feature(name: "projects/#{gcp_project_id}/locations/#{featurestore_entity_type_feature['region']}/featurestores/#{featurestore_entity_type_feature['featurestore']}/entityTypes/#{featurestore_entity_type_feature['entityType']}/features/#{featurestore_entity_type_feature['feature']}", region: <%= doc_generation ? "' #{featurestore_entity_type_feature['region']}'":"featurestore_entity_type_feature['region']" -%>) do + it { should exist } + its('description') { should cmp <%= doc_generation ? "'#{featurestore_entity_type_feature['description']}'" : "featurestore_entity_type_feature['description']" -%> } + its('create_time') { should cmp <%= doc_generation ? "'#{featurestore_entity_type_feature['create_time']}'" : "featurestore_entity_type_feature['create_time']" -%> } + its('etag') { should cmp <%= doc_generation ? "'#{featurestore_entity_type_feature['etag']}'" : "featurestore_entity_type_feature['etag']" -%> } + its('name') { should cmp <%= doc_generation ? "'#{featurestore_entity_type_feature['name']}'" : "featurestore_entity_type_feature['name']" -%> } + its('update_time') { should cmp <%= doc_generation ? "'#{featurestore_entity_type_feature['update_time']}'" : "featurestore_entity_type_feature['update_time']" -%> } + its('value_type') { should cmp <%= doc_generation ? "'#{featurestore_entity_type_feature['value_type']}'" : "featurestore_entity_type_feature['value_type']" -%> } + +end + +describe google_vertex_ai_featurestore_entity_type_feature(name: "does_not_exit", region: <%= doc_generation ? "' #{featurestore_entity_type_feature['region']}'":"featurestore_entity_type_feature['region']" -%>) do + it { should_not exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_featurestore_entity_type_feature/google_vertex_ai_featurestore_entity_type_feature_attributes.erb b/mmv1/templates/inspec/examples/google_vertex_ai_featurestore_entity_type_feature/google_vertex_ai_featurestore_entity_type_feature_attributes.erb new file mode 100644 index 000000000..46d23f182 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_featurestore_entity_type_feature/google_vertex_ai_featurestore_entity_type_feature_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + featurestore_entity_type_feature = input('featurestore_entity_type_feature', value: <%= JSON.pretty_generate(grab_attributes(pwd)['featurestore_entity_type_feature']) -%>, description: 'featurestore_entity_type_feature description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_featurestore_entity_type_feature/google_vertex_ai_featurestore_entity_type_features.erb b/mmv1/templates/inspec/examples/google_vertex_ai_featurestore_entity_type_feature/google_vertex_ai_featurestore_entity_type_features.erb new file mode 100644 index 000000000..9b385f98d --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_featurestore_entity_type_feature/google_vertex_ai_featurestore_entity_type_features.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% featurestore_entity_type_feature = grab_attributes(pwd)['featurestore_entity_type_feature'] -%> + describe google_vertex_ai_featurestore_entity_type_features(parent: "projects/#{gcp_project_id}/locations/#{featurestore_entity_type_feature['region']}/featurestores/#{featurestore_entity_type_feature['featurestore']}/entityTypes/#{featurestore_entity_type_feature['entityType']}", region: <%= doc_generation ? "' #{featurestore_entity_type_feature['region']}'":"featurestore_entity_type_feature['region']" -%>) do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_featurestores_entity_type/google_vertex_ai_featurestores_entity_type.erb b/mmv1/templates/inspec/examples/google_vertex_ai_featurestores_entity_type/google_vertex_ai_featurestores_entity_type.erb new file mode 100644 index 000000000..4d0b80d94 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_featurestores_entity_type/google_vertex_ai_featurestores_entity_type.erb @@ -0,0 +1,15 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% featurestores_entity_type = grab_attributes(pwd)['featurestores_entity_type'] -%> +describe google_vertex_ai_featurestores_entity_type(name: "projects/#{gcp_project_id}/locations/#{featurestores_entity_type['region']}/featurestores/#{featurestores_entity_type['featurestore']}/entityTypes/#{featurestores_entity_type['name']}", region: <%= doc_generation ? "' #{featurestores_entity_type['region']}'":"featurestores_entity_type['region']" -%>) do + it { should exist } + its('description') { should cmp <%= doc_generation ? "'#{featurestores_entity_type['description']}'" : "featurestores_entity_type['description']" -%> } + its('name') { should cmp <%= doc_generation ? "'#{featurestores_entity_type['name']}'" : "featurestores_entity_type['name']" -%> } + its('create_time') { should cmp <%= doc_generation ? "'#{featurestores_entity_type['create_time']}'" : "featurestores_entity_type['create_time']" -%> } + its('etag') { should cmp <%= doc_generation ? "'#{featurestores_entity_type['etag']}'" : "featurestores_entity_type['etag']" -%> } + its('update_time') { should cmp <%= doc_generation ? "'#{featurestores_entity_type['update_time']}'" : "featurestores_entity_type['update_time']" -%> } + +end + +describe google_vertex_ai_featurestores_entity_type(name: "does_not_exit", region: <%= doc_generation ? "' #{featurestores_entity_type['region']}'":"featurestores_entity_type['region']" -%>) do + it { should_not exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_featurestores_entity_type/google_vertex_ai_featurestores_entity_type_attributes.erb b/mmv1/templates/inspec/examples/google_vertex_ai_featurestores_entity_type/google_vertex_ai_featurestores_entity_type_attributes.erb new file mode 100644 index 000000000..d85b5d665 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_featurestores_entity_type/google_vertex_ai_featurestores_entity_type_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + featurestores_entity_type = input('featurestores_entity_type', value: <%= JSON.pretty_generate(grab_attributes(pwd)['featurestores_entity_type']) -%>, description: 'featurestores_entity_type description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_featurestores_entity_type/google_vertex_ai_featurestores_entity_types.erb b/mmv1/templates/inspec/examples/google_vertex_ai_featurestores_entity_type/google_vertex_ai_featurestores_entity_types.erb new file mode 100644 index 000000000..103f869cd --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_featurestores_entity_type/google_vertex_ai_featurestores_entity_types.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% featurestores_entity_type = grab_attributes(pwd)['featurestores_entity_type'] -%> + describe google_vertex_ai_featurestores_entity_types(parent: "projects/#{gcp_project_id}/locations/#{featurestores_entity_type['region']}/featurestores/#{featurestores_entity_type['featurestore']}", region: <%= doc_generation ? "' #{featurestores_entity_type['region']}'":"featurestores_entity_type['region']" -%>) do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_hyperparameter_tuning_job/google_vertex_ai_hyperparameter_tuning_job.erb b/mmv1/templates/inspec/examples/google_vertex_ai_hyperparameter_tuning_job/google_vertex_ai_hyperparameter_tuning_job.erb new file mode 100644 index 000000000..0c5e37003 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_hyperparameter_tuning_job/google_vertex_ai_hyperparameter_tuning_job.erb @@ -0,0 +1,17 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% hyperparameter_tuning_job = grab_attributes(pwd)['hyperparameter_tuning_job'] -%> +describe google_vertex_ai_hyperparameter_tuning_job(name: "projects/#{gcp_project_id}/locations/#{hyperparameter_tuning_job['region']}/hyperparameterTuningJobs/#{hyperparameter_tuning_job['name']}", region: <%= doc_generation ? "' #{hyperparameter_tuning_job['region']}'":"hyperparameter_tuning_job['region']" -%>) do + it { should exist } + its('state') { should cmp <%= doc_generation ? "'#{hyperparameter_tuning_job['state']}'" : "hyperparameter_tuning_job['state']" -%> } + its('end_time') { should cmp <%= doc_generation ? "'#{hyperparameter_tuning_job['end_time']}'" : "hyperparameter_tuning_job['end_time']" -%> } + its('update_time') { should cmp <%= doc_generation ? "'#{hyperparameter_tuning_job['update_time']}'" : "hyperparameter_tuning_job['update_time']" -%> } + its('start_time') { should cmp <%= doc_generation ? "'#{hyperparameter_tuning_job['start_time']}'" : "hyperparameter_tuning_job['start_time']" -%> } + its('create_time') { should cmp <%= doc_generation ? "'#{hyperparameter_tuning_job['create_time']}'" : "hyperparameter_tuning_job['create_time']" -%> } + its('display_name') { should cmp <%= doc_generation ? "'#{hyperparameter_tuning_job['display_name']}'" : "hyperparameter_tuning_job['display_name']" -%> } + its('name') { should cmp <%= doc_generation ? "'#{hyperparameter_tuning_job['name']}'" : "hyperparameter_tuning_job['name']" -%> } + +end + +describe google_vertex_ai_hyperparameter_tuning_job(name: "does_not_exit", region: <%= doc_generation ? "' #{hyperparameter_tuning_job['region']}'":"hyperparameter_tuning_job['region']" -%>) do + it { should_not exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_hyperparameter_tuning_job/google_vertex_ai_hyperparameter_tuning_job_attributes.erb b/mmv1/templates/inspec/examples/google_vertex_ai_hyperparameter_tuning_job/google_vertex_ai_hyperparameter_tuning_job_attributes.erb new file mode 100644 index 000000000..774f54e0a --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_hyperparameter_tuning_job/google_vertex_ai_hyperparameter_tuning_job_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + hyperparameter_tuning_job = input('hyperparameter_tuning_job', value: <%= JSON.pretty_generate(grab_attributes(pwd)['hyperparameter_tuning_job']) -%>, description: 'hyperparameter_tuning_job description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_hyperparameter_tuning_job/google_vertex_ai_hyperparameter_tuning_jobs.erb b/mmv1/templates/inspec/examples/google_vertex_ai_hyperparameter_tuning_job/google_vertex_ai_hyperparameter_tuning_jobs.erb new file mode 100644 index 000000000..52bc08b6c --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_hyperparameter_tuning_job/google_vertex_ai_hyperparameter_tuning_jobs.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% hyperparameter_tuning_job = grab_attributes(pwd)['hyperparameter_tuning_job'] -%> + describe google_vertex_ai_hyperparameter_tuning_jobs(parent: "projects/#{gcp_project_id}/locations/#{hyperparameter_tuning_job['region']}", region: <%= doc_generation ? "' #{hyperparameter_tuning_job['region']}'":"hyperparameter_tuning_job['region']" -%>) do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_index/google_vertex_ai_index.erb b/mmv1/templates/inspec/examples/google_vertex_ai_index/google_vertex_ai_index.erb new file mode 100644 index 000000000..baf9c0cfd --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_index/google_vertex_ai_index.erb @@ -0,0 +1,18 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% index = grab_attributes(pwd)['index'] -%> +describe google_vertex_ai_index(name: "projects/#{gcp_project_id}/locations/#{index['region']}/indexes/#{index['name']}", region: <%= doc_generation ? "' #{index['region']}'":"index['region']" -%>) do + it { should exist } + its('description') { should cmp <%= doc_generation ? "'#{index['description']}'" : "index['description']" -%> } + its('name') { should cmp <%= doc_generation ? "'#{index['name']}'" : "index['name']" -%> } + its('display_name') { should cmp <%= doc_generation ? "'#{index['display_name']}'" : "index['display_name']" -%> } + its('metadata_schema_uri') { should cmp <%= doc_generation ? "'#{index['metadata_schema_uri']}'" : "index['metadata_schema_uri']" -%> } + its('index_update_method') { should cmp <%= doc_generation ? "'#{index['index_update_method']}'" : "index['index_update_method']" -%> } + its('update_time') { should cmp <%= doc_generation ? "'#{index['update_time']}'" : "index['update_time']" -%> } + its('create_time') { should cmp <%= doc_generation ? "'#{index['create_time']}'" : "index['create_time']" -%> } + its('etag') { should cmp <%= doc_generation ? "'#{index['etag']}'" : "index['etag']" -%> } + +end + +describe google_vertex_ai_index(name: "does_not_exit", region: <%= doc_generation ? "' #{index['region']}'":"index['region']" -%>) do + it { should_not exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_index/google_vertex_ai_index_attributes.erb b/mmv1/templates/inspec/examples/google_vertex_ai_index/google_vertex_ai_index_attributes.erb new file mode 100644 index 000000000..bb573e72d --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_index/google_vertex_ai_index_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + index = input('index', value: <%= JSON.pretty_generate(grab_attributes(pwd)['index']) -%>, description: 'index description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_index/google_vertex_ai_indices.erb b/mmv1/templates/inspec/examples/google_vertex_ai_index/google_vertex_ai_indices.erb new file mode 100644 index 000000000..7870e5a49 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_index/google_vertex_ai_indices.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% index = grab_attributes(pwd)['index'] -%> + describe google_vertex_ai_indexes(parent: "projects/#{gcp_project_id}/locations/#{index['region']}", region: <%= doc_generation ? "' #{index['region']}'":"index['region']" -%>) do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_index_endpoint/google_vertex_ai_index_endpoint.erb b/mmv1/templates/inspec/examples/google_vertex_ai_index_endpoint/google_vertex_ai_index_endpoint.erb new file mode 100644 index 000000000..c583831a1 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_index_endpoint/google_vertex_ai_index_endpoint.erb @@ -0,0 +1,18 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% index_endpoint = grab_attributes(pwd)['index_endpoint'] -%> +describe google_vertex_ai_index_endpoint(name: "projects/#{gcp_project_id}/locations/#{index_endpoint['region']}/indexEndpoints/#{index_endpoint['name']}", region: <%= doc_generation ? "' #{index_endpoint['region']}'":"index_endpoint['region']" -%>) do + it { should exist } + its('display_name') { should cmp <%= doc_generation ? "'#{index_endpoint['display_name']}'" : "index_endpoint['display_name']" -%> } + its('create_time') { should cmp <%= doc_generation ? "'#{index_endpoint['create_time']}'" : "index_endpoint['create_time']" -%> } + its('name') { should cmp <%= doc_generation ? "'#{index_endpoint['name']}'" : "index_endpoint['name']" -%> } + its('network') { should cmp <%= doc_generation ? "'#{index_endpoint['network']}'" : "index_endpoint['network']" -%> } + its('update_time') { should cmp <%= doc_generation ? "'#{index_endpoint['update_time']}'" : "index_endpoint['update_time']" -%> } + its('public_endpoint_domain_name') { should cmp <%= doc_generation ? "'#{index_endpoint['public_endpoint_domain_name']}'" : "index_endpoint['public_endpoint_domain_name']" -%> } + its('etag') { should cmp <%= doc_generation ? "'#{index_endpoint['etag']}'" : "index_endpoint['etag']" -%> } + its('description') { should cmp <%= doc_generation ? "'#{index_endpoint['description']}'" : "index_endpoint['description']" -%> } + +end + +describe google_vertex_ai_index_endpoint(name: "does_not_exit", region: <%= doc_generation ? "' #{index_endpoint['region']}'":"index_endpoint['region']" -%>) do + it { should_not exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_index_endpoint/google_vertex_ai_index_endpoint_attributes.erb b/mmv1/templates/inspec/examples/google_vertex_ai_index_endpoint/google_vertex_ai_index_endpoint_attributes.erb new file mode 100644 index 000000000..1a601c5d2 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_index_endpoint/google_vertex_ai_index_endpoint_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + index_endpoint = input('index_endpoint', value: <%= JSON.pretty_generate(grab_attributes(pwd)['index_endpoint']) -%>, description: 'index_endpoint description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_index_endpoint/google_vertex_ai_index_endpoints.erb b/mmv1/templates/inspec/examples/google_vertex_ai_index_endpoint/google_vertex_ai_index_endpoints.erb new file mode 100644 index 000000000..c1578a679 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_index_endpoint/google_vertex_ai_index_endpoints.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% index_endpoint = grab_attributes(pwd)['index_endpoint'] -%> + describe google_vertex_ai_index_endpoints(parent: "projects/#{gcp_project_id}/locations/#{index_endpoint['region']}", region: <%= doc_generation ? "' #{index_endpoint['region']}'":"index_endpoint['region']" -%>) do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_metadata_store/google_vertex_ai_metadata_store.erb b/mmv1/templates/inspec/examples/google_vertex_ai_metadata_store/google_vertex_ai_metadata_store.erb new file mode 100644 index 000000000..8ca8a4b33 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_metadata_store/google_vertex_ai_metadata_store.erb @@ -0,0 +1,14 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% metadata_store = grab_attributes(pwd)['metadata_store'] -%> +describe google_vertex_ai_metadata_store(name: "projects/#{gcp_project_id}/locations/#{metadata_store['region']}/metadataStores/#{metadata_store['name']}", region: <%= doc_generation ? "' #{metadata_store['region']}'":"metadata_store['region']" -%>) do + it { should exist } + its('description') { should cmp <%= doc_generation ? "'#{metadata_store['description']}'" : "metadata_store['description']" -%> } + its('create_time') { should cmp <%= doc_generation ? "'#{metadata_store['create_time']}'" : "metadata_store['create_time']" -%> } + its('update_time') { should cmp <%= doc_generation ? "'#{metadata_store['update_time']}'" : "metadata_store['update_time']" -%> } + its('name') { should cmp <%= doc_generation ? "'#{metadata_store['name']}'" : "metadata_store['name']" -%> } + +end + +describe google_vertex_ai_metadata_store(name: "does_not_exit", region: <%= doc_generation ? "' #{metadata_store['region']}'":"metadata_store['region']" -%>) do + it { should_not exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_metadata_store/google_vertex_ai_metadata_store_attributes.erb b/mmv1/templates/inspec/examples/google_vertex_ai_metadata_store/google_vertex_ai_metadata_store_attributes.erb new file mode 100644 index 000000000..d81567db2 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_metadata_store/google_vertex_ai_metadata_store_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + metadata_store = input('metadata_store', value: <%= JSON.pretty_generate(grab_attributes(pwd)['metadata_store']) -%>, description: 'metadata_store description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_metadata_store/google_vertex_ai_metadata_stores.erb b/mmv1/templates/inspec/examples/google_vertex_ai_metadata_store/google_vertex_ai_metadata_stores.erb new file mode 100644 index 000000000..bdcd4159f --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_metadata_store/google_vertex_ai_metadata_stores.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% metadata_store = grab_attributes(pwd)['metadata_store'] -%> + describe google_vertex_ai_metadata_stores(parent: "projects/#{gcp_project_id}/locations/#{metadata_store['region']}", region: <%= doc_generation ? "' #{metadata_store['region']}'":"metadata_store['region']" -%>) do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_metadata_stores_artifact/google_vertex_ai_metadata_stores_artifact.erb b/mmv1/templates/inspec/examples/google_vertex_ai_metadata_stores_artifact/google_vertex_ai_metadata_stores_artifact.erb new file mode 100644 index 000000000..cc3772f2c --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_metadata_stores_artifact/google_vertex_ai_metadata_stores_artifact.erb @@ -0,0 +1,20 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% metadata_stores_artifact = grab_attributes(pwd)['metadata_stores_artifact'] -%> +describe google_vertex_ai_metadata_stores_artifact(name: "projects/#{gcp_project_id}/locations/#{metadata_stores_artifact['region']}/metadataStores/#{metadata_stores_artifact['metadataStore']}/artifacts/#{metadata_stores_artifact['name']}", region: <%= doc_generation ? "' #{metadata_stores_artifact['region']}'":"metadata_stores_artifact['region']" -%>) do + it { should exist } + its('schema_version') { should cmp <%= doc_generation ? "'#{metadata_stores_artifact['schema_version']}'" : "metadata_stores_artifact['schema_version']" -%> } + its('display_name') { should cmp <%= doc_generation ? "'#{metadata_stores_artifact['display_name']}'" : "metadata_stores_artifact['display_name']" -%> } + its('etag') { should cmp <%= doc_generation ? "'#{metadata_stores_artifact['etag']}'" : "metadata_stores_artifact['etag']" -%> } + its('name') { should cmp <%= doc_generation ? "'#{metadata_stores_artifact['name']}'" : "metadata_stores_artifact['name']" -%> } + its('update_time') { should cmp <%= doc_generation ? "'#{metadata_stores_artifact['update_time']}'" : "metadata_stores_artifact['update_time']" -%> } + its('state') { should cmp <%= doc_generation ? "'#{metadata_stores_artifact['state']}'" : "metadata_stores_artifact['state']" -%> } + its('uri') { should cmp <%= doc_generation ? "'#{metadata_stores_artifact['uri']}'" : "metadata_stores_artifact['uri']" -%> } + its('create_time') { should cmp <%= doc_generation ? "'#{metadata_stores_artifact['create_time']}'" : "metadata_stores_artifact['create_time']" -%> } + its('schema_title') { should cmp <%= doc_generation ? "'#{metadata_stores_artifact['schema_title']}'" : "metadata_stores_artifact['schema_title']" -%> } + its('description') { should cmp <%= doc_generation ? "'#{metadata_stores_artifact['description']}'" : "metadata_stores_artifact['description']" -%> } + +end + +describe google_vertex_ai_metadata_stores_artifact(name: "does_not_exit", region: <%= doc_generation ? "' #{metadata_stores_artifact['region']}'":"metadata_stores_artifact['region']" -%>) do + it { should_not exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_metadata_stores_artifact/google_vertex_ai_metadata_stores_artifact_attributes.erb b/mmv1/templates/inspec/examples/google_vertex_ai_metadata_stores_artifact/google_vertex_ai_metadata_stores_artifact_attributes.erb new file mode 100644 index 000000000..784e3dcee --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_metadata_stores_artifact/google_vertex_ai_metadata_stores_artifact_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + metadata_stores_artifact = input('metadata_stores_artifact', value: <%= JSON.pretty_generate(grab_attributes(pwd)['metadata_stores_artifact']) -%>, description: 'metadata_stores_artifact description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_metadata_stores_artifact/google_vertex_ai_metadata_stores_artifacts.erb b/mmv1/templates/inspec/examples/google_vertex_ai_metadata_stores_artifact/google_vertex_ai_metadata_stores_artifacts.erb new file mode 100644 index 000000000..c8a18f726 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_metadata_stores_artifact/google_vertex_ai_metadata_stores_artifacts.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% metadata_stores_artifact = grab_attributes(pwd)['metadata_stores_artifact'] -%> + describe google_vertex_ai_metadata_stores_artifacts(parent: "projects/#{gcp_project_id}/locations/#{metadata_stores_artifact['region']}/metadataStores/#{metadata_stores_artifact['metadataStore']}", region: <%= doc_generation ? "' #{metadata_stores_artifact['region']}'":"metadata_stores_artifact['region']" -%>) do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_metadata_stores_context/google_vertex_ai_metadata_stores_context.erb b/mmv1/templates/inspec/examples/google_vertex_ai_metadata_stores_context/google_vertex_ai_metadata_stores_context.erb new file mode 100644 index 000000000..23f308c9e --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_metadata_stores_context/google_vertex_ai_metadata_stores_context.erb @@ -0,0 +1,18 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% metadata_stores_context = grab_attributes(pwd)['metadata_stores_context'] -%> +describe google_vertex_ai_metadata_stores_context(name: "projects/#{gcp_project_id}/locations/#{metadata_stores_context['region']}/metadataStores/#{metadata_stores_context['metadataStore']}/contexts/#{metadata_stores_context['name']}", region: <%= doc_generation ? "' #{metadata_stores_context['region']}'":"metadata_stores_context['region']" -%>) do + it { should exist } + its('name') { should cmp <%= doc_generation ? "'#{metadata_stores_context['name']}'" : "metadata_stores_context['name']" -%> } + its('schema_title') { should cmp <%= doc_generation ? "'#{metadata_stores_context['schema_title']}'" : "metadata_stores_context['schema_title']" -%> } + its('etag') { should cmp <%= doc_generation ? "'#{metadata_stores_context['etag']}'" : "metadata_stores_context['etag']" -%> } + its('description') { should cmp <%= doc_generation ? "'#{metadata_stores_context['description']}'" : "metadata_stores_context['description']" -%> } + its('display_name') { should cmp <%= doc_generation ? "'#{metadata_stores_context['display_name']}'" : "metadata_stores_context['display_name']" -%> } + its('schema_version') { should cmp <%= doc_generation ? "'#{metadata_stores_context['schema_version']}'" : "metadata_stores_context['schema_version']" -%> } + its('create_time') { should cmp <%= doc_generation ? "'#{metadata_stores_context['create_time']}'" : "metadata_stores_context['create_time']" -%> } + its('update_time') { should cmp <%= doc_generation ? "'#{metadata_stores_context['update_time']}'" : "metadata_stores_context['update_time']" -%> } + +end + +describe google_vertex_ai_metadata_stores_context(name: "does_not_exit", region: <%= doc_generation ? "' #{metadata_stores_context['region']}'":"metadata_stores_context['region']" -%>) do + it { should_not exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_metadata_stores_context/google_vertex_ai_metadata_stores_context_attributes.erb b/mmv1/templates/inspec/examples/google_vertex_ai_metadata_stores_context/google_vertex_ai_metadata_stores_context_attributes.erb new file mode 100644 index 000000000..b1ec41710 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_metadata_stores_context/google_vertex_ai_metadata_stores_context_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + metadata_stores_context = input('metadata_stores_context', value: <%= JSON.pretty_generate(grab_attributes(pwd)['metadata_stores_context']) -%>, description: 'metadata_stores_context description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_metadata_stores_context/google_vertex_ai_metadata_stores_contexts.erb b/mmv1/templates/inspec/examples/google_vertex_ai_metadata_stores_context/google_vertex_ai_metadata_stores_contexts.erb new file mode 100644 index 000000000..b25c5712b --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_metadata_stores_context/google_vertex_ai_metadata_stores_contexts.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% metadata_stores_context = grab_attributes(pwd)['metadata_stores_context'] -%> + describe google_vertex_ai_metadata_stores_contexts(parent: "projects/#{gcp_project_id}/locations/#{metadata_stores_context['region']}/metadataStores/#{metadata_stores_context['metadataStore']}", region: <%= doc_generation ? "' #{metadata_stores_context['region']}'":"metadata_stores_context['region']" -%>) do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_metadata_stores_execution/google_vertex_ai_metadata_stores_execution.erb b/mmv1/templates/inspec/examples/google_vertex_ai_metadata_stores_execution/google_vertex_ai_metadata_stores_execution.erb new file mode 100644 index 000000000..a81f95801 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_metadata_stores_execution/google_vertex_ai_metadata_stores_execution.erb @@ -0,0 +1,19 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% metadata_stores_execution = grab_attributes(pwd)['metadata_stores_execution'] -%> +describe google_vertex_ai_metadata_stores_execution(name: "projects/#{gcp_project_id}/locations/#{metadata_stores_execution['region']}/metadataStores/#{metadata_stores_execution['metadataStore']}/executions/#{metadata_stores_execution['name']}", region: <%= doc_generation ? "' #{metadata_stores_execution['region']}'":"metadata_stores_execution['region']" -%>) do + it { should exist } + its('create_time') { should cmp <%= doc_generation ? "'#{metadata_stores_execution['create_time']}'" : "metadata_stores_execution['create_time']" -%> } + its('schema_version') { should cmp <%= doc_generation ? "'#{metadata_stores_execution['schema_version']}'" : "metadata_stores_execution['schema_version']" -%> } + its('state') { should cmp <%= doc_generation ? "'#{metadata_stores_execution['state']}'" : "metadata_stores_execution['state']" -%> } + its('name') { should cmp <%= doc_generation ? "'#{metadata_stores_execution['name']}'" : "metadata_stores_execution['name']" -%> } + its('etag') { should cmp <%= doc_generation ? "'#{metadata_stores_execution['etag']}'" : "metadata_stores_execution['etag']" -%> } + its('display_name') { should cmp <%= doc_generation ? "'#{metadata_stores_execution['display_name']}'" : "metadata_stores_execution['display_name']" -%> } + its('schema_title') { should cmp <%= doc_generation ? "'#{metadata_stores_execution['schema_title']}'" : "metadata_stores_execution['schema_title']" -%> } + its('description') { should cmp <%= doc_generation ? "'#{metadata_stores_execution['description']}'" : "metadata_stores_execution['description']" -%> } + its('update_time') { should cmp <%= doc_generation ? "'#{metadata_stores_execution['update_time']}'" : "metadata_stores_execution['update_time']" -%> } + +end + +describe google_vertex_ai_metadata_stores_execution(name: "does_not_exit", region: <%= doc_generation ? "' #{metadata_stores_execution['region']}'":"metadata_stores_execution['region']" -%>) do + it { should_not exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_metadata_stores_execution/google_vertex_ai_metadata_stores_execution_attributes.erb b/mmv1/templates/inspec/examples/google_vertex_ai_metadata_stores_execution/google_vertex_ai_metadata_stores_execution_attributes.erb new file mode 100644 index 000000000..fe73a0dd8 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_metadata_stores_execution/google_vertex_ai_metadata_stores_execution_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + metadata_stores_execution = input('metadata_stores_execution', value: <%= JSON.pretty_generate(grab_attributes(pwd)['metadata_stores_execution']) -%>, description: 'metadata_stores_execution description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_metadata_stores_execution/google_vertex_ai_metadata_stores_executions.erb b/mmv1/templates/inspec/examples/google_vertex_ai_metadata_stores_execution/google_vertex_ai_metadata_stores_executions.erb new file mode 100644 index 000000000..02f27c44a --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_metadata_stores_execution/google_vertex_ai_metadata_stores_executions.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% metadata_stores_execution = grab_attributes(pwd)['metadata_stores_execution'] -%> + describe google_vertex_ai_metadata_stores_executions(parent: "projects/#{gcp_project_id}/locations/#{metadata_stores_execution['region']}/metadataStores/#{metadata_stores_execution['metadataStore']}", region: <%= doc_generation ? "' #{metadata_stores_execution['region']}'":"metadata_stores_execution['region']" -%>) do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_metadata_stores_metadata_schema/google_vertex_ai_metadata_stores_metadata_schema.erb b/mmv1/templates/inspec/examples/google_vertex_ai_metadata_stores_metadata_schema/google_vertex_ai_metadata_stores_metadata_schema.erb new file mode 100644 index 000000000..d4a344330 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_metadata_stores_metadata_schema/google_vertex_ai_metadata_stores_metadata_schema.erb @@ -0,0 +1,16 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% metadata_stores_metadata_schema = grab_attributes(pwd)['metadata_stores_metadata_schema'] -%> +describe google_vertex_ai_metadata_stores_metadata_schema(name: "projects/#{gcp_project_id}/locations/#{metadata_stores_metadata_schema['region']}/metadataStores/#{metadata_stores_metadata_schema['metadataStore']}/metadataSchemas/#{metadata_stores_metadata_schema['name']}", region: <%= doc_generation ? "' #{metadata_stores_metadata_schema['region']}'":"metadata_stores_metadata_schema['region']" -%>) do + it { should exist } + its('schema_type') { should cmp <%= doc_generation ? "'#{metadata_stores_metadata_schema['schema_type']}'" : "metadata_stores_metadata_schema['schema_type']" -%> } + its('description') { should cmp <%= doc_generation ? "'#{metadata_stores_metadata_schema['description']}'" : "metadata_stores_metadata_schema['description']" -%> } + its('schema_version') { should cmp <%= doc_generation ? "'#{metadata_stores_metadata_schema['schema_version']}'" : "metadata_stores_metadata_schema['schema_version']" -%> } + its('name') { should cmp <%= doc_generation ? "'#{metadata_stores_metadata_schema['name']}'" : "metadata_stores_metadata_schema['name']" -%> } + its('create_time') { should cmp <%= doc_generation ? "'#{metadata_stores_metadata_schema['create_time']}'" : "metadata_stores_metadata_schema['create_time']" -%> } + its('schema') { should cmp <%= doc_generation ? "'#{metadata_stores_metadata_schema['schema']}'" : "metadata_stores_metadata_schema['schema']" -%> } + +end + +describe google_vertex_ai_metadata_stores_metadata_schema(name: "does_not_exit", region: <%= doc_generation ? "' #{metadata_stores_metadata_schema['region']}'":"metadata_stores_metadata_schema['region']" -%>) do + it { should_not exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_metadata_stores_metadata_schema/google_vertex_ai_metadata_stores_metadata_schema_attributes.erb b/mmv1/templates/inspec/examples/google_vertex_ai_metadata_stores_metadata_schema/google_vertex_ai_metadata_stores_metadata_schema_attributes.erb new file mode 100644 index 000000000..31447185f --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_metadata_stores_metadata_schema/google_vertex_ai_metadata_stores_metadata_schema_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + metadata_stores_metadata_schema = input('metadata_stores_metadata_schema', value: <%= JSON.pretty_generate(grab_attributes(pwd)['metadata_stores_metadata_schema']) -%>, description: 'metadata_stores_metadata_schema description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_metadata_stores_metadata_schema/google_vertex_ai_metadata_stores_metadata_schemas.erb b/mmv1/templates/inspec/examples/google_vertex_ai_metadata_stores_metadata_schema/google_vertex_ai_metadata_stores_metadata_schemas.erb new file mode 100644 index 000000000..524018322 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_metadata_stores_metadata_schema/google_vertex_ai_metadata_stores_metadata_schemas.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% metadata_stores_metadata_schema = grab_attributes(pwd)['metadata_stores_metadata_schema'] -%> + describe google_vertex_ai_metadata_stores_metadata_schemas(parent: "projects/#{gcp_project_id}/locations/#{metadata_stores_metadata_schema['region']}/metadataStores/#{metadata_stores_metadata_schema['metadataStore']}", region: <%= doc_generation ? "' #{metadata_stores_metadata_schema['region']}'":"metadata_stores_metadata_schema['region']" -%>) do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_model/google_vertex_ai_model.erb b/mmv1/templates/inspec/examples/google_vertex_ai_model/google_vertex_ai_model.erb new file mode 100644 index 000000000..6c8a138ed --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_model/google_vertex_ai_model.erb @@ -0,0 +1,25 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% model = grab_attributes(pwd)['model'] -%> +describe google_vertex_ai_model(name: "projects/#{gcp_project_id}/locations/#{model['region']}/models/#{model['name']}", region: <%= doc_generation ? "' #{model['region']}'":"model['region']" -%>) do + it { should exist } + its('name') { should cmp <%= doc_generation ? "'#{model['name']}'" : "model['name']" -%> } + its('update_time') { should cmp <%= doc_generation ? "'#{model['update_time']}'" : "model['update_time']" -%> } + its('etag') { should cmp <%= doc_generation ? "'#{model['etag']}'" : "model['etag']" -%> } + its('description') { should cmp <%= doc_generation ? "'#{model['description']}'" : "model['description']" -%> } + its('create_time') { should cmp <%= doc_generation ? "'#{model['create_time']}'" : "model['create_time']" -%> } + its('pipeline_job') { should cmp <%= doc_generation ? "'#{model['pipeline_job']}'" : "model['pipeline_job']" -%> } + its('version_update_time') { should cmp <%= doc_generation ? "'#{model['version_update_time']}'" : "model['version_update_time']" -%> } + its('metadata_artifact') { should cmp <%= doc_generation ? "'#{model['metadata_artifact']}'" : "model['metadata_artifact']" -%> } + its('metadata_schema_uri') { should cmp <%= doc_generation ? "'#{model['metadata_schema_uri']}'" : "model['metadata_schema_uri']" -%> } + its('version_id') { should cmp <%= doc_generation ? "'#{model['version_id']}'" : "model['version_id']" -%> } + its('artifact_uri') { should cmp <%= doc_generation ? "'#{model['artifact_uri']}'" : "model['artifact_uri']" -%> } + its('training_pipeline') { should cmp <%= doc_generation ? "'#{model['training_pipeline']}'" : "model['training_pipeline']" -%> } + its('display_name') { should cmp <%= doc_generation ? "'#{model['display_name']}'" : "model['display_name']" -%> } + its('version_create_time') { should cmp <%= doc_generation ? "'#{model['version_create_time']}'" : "model['version_create_time']" -%> } + its('version_description') { should cmp <%= doc_generation ? "'#{model['version_description']}'" : "model['version_description']" -%> } + +end + +describe google_vertex_ai_model(name: "does_not_exit", region: <%= doc_generation ? "' #{model['region']}'":"model['region']" -%>) do + it { should_not exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_model/google_vertex_ai_model_attributes.erb b/mmv1/templates/inspec/examples/google_vertex_ai_model/google_vertex_ai_model_attributes.erb new file mode 100644 index 000000000..5f5eeccd4 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_model/google_vertex_ai_model_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + model = input('model', value: <%= JSON.pretty_generate(grab_attributes(pwd)['model']) -%>, description: 'model description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_model/google_vertex_ai_models.erb b/mmv1/templates/inspec/examples/google_vertex_ai_model/google_vertex_ai_models.erb new file mode 100644 index 000000000..8a489888e --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_model/google_vertex_ai_models.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% model = grab_attributes(pwd)['model'] -%> + describe google_vertex_ai_models(parent: "projects/#{gcp_project_id}/locations/#{model['region']}", region: <%= doc_generation ? "' #{model['region']}'":"model['region']" -%>) do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_model_deployment_monitoring_job/google_vertex_ai_model_deployment_monitoring_job.erb b/mmv1/templates/inspec/examples/google_vertex_ai_model_deployment_monitoring_job/google_vertex_ai_model_deployment_monitoring_job.erb new file mode 100644 index 000000000..a45559263 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_model_deployment_monitoring_job/google_vertex_ai_model_deployment_monitoring_job.erb @@ -0,0 +1,21 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% model_deployment_monitoring_job = grab_attributes(pwd)['model_deployment_monitoring_job'] -%> +describe google_vertex_ai_model_deployment_monitoring_job(name: "projects/#{gcp_project_id}/locations/#{model_deployment_monitoring_job['region']}/modelDeploymentMonitoringJobs/#{model_deployment_monitoring_job['name']}", region: <%= doc_generation ? "' #{model_deployment_monitoring_job['region']}'":"model_deployment_monitoring_job['region']" -%>) do + it { should exist } + its('state') { should cmp <%= doc_generation ? "'#{model_deployment_monitoring_job['state']}'" : "model_deployment_monitoring_job['state']" -%> } + its('analysis_instance_schema_uri') { should cmp <%= doc_generation ? "'#{model_deployment_monitoring_job['analysis_instance_schema_uri']}'" : "model_deployment_monitoring_job['analysis_instance_schema_uri']" -%> } + its('endpoint') { should cmp <%= doc_generation ? "'#{model_deployment_monitoring_job['endpoint']}'" : "model_deployment_monitoring_job['endpoint']" -%> } + its('display_name') { should cmp <%= doc_generation ? "'#{model_deployment_monitoring_job['display_name']}'" : "model_deployment_monitoring_job['display_name']" -%> } + its('schedule_state') { should cmp <%= doc_generation ? "'#{model_deployment_monitoring_job['schedule_state']}'" : "model_deployment_monitoring_job['schedule_state']" -%> } + its('predict_instance_schema_uri') { should cmp <%= doc_generation ? "'#{model_deployment_monitoring_job['predict_instance_schema_uri']}'" : "model_deployment_monitoring_job['predict_instance_schema_uri']" -%> } + its('next_schedule_time') { should cmp <%= doc_generation ? "'#{model_deployment_monitoring_job['next_schedule_time']}'" : "model_deployment_monitoring_job['next_schedule_time']" -%> } + its('create_time') { should cmp <%= doc_generation ? "'#{model_deployment_monitoring_job['create_time']}'" : "model_deployment_monitoring_job['create_time']" -%> } + its('log_ttl') { should cmp <%= doc_generation ? "'#{model_deployment_monitoring_job['log_ttl']}'" : "model_deployment_monitoring_job['log_ttl']" -%> } + its('update_time') { should cmp <%= doc_generation ? "'#{model_deployment_monitoring_job['update_time']}'" : "model_deployment_monitoring_job['update_time']" -%> } + its('name') { should cmp <%= doc_generation ? "'#{model_deployment_monitoring_job['name']}'" : "model_deployment_monitoring_job['name']" -%> } + +end + +describe google_vertex_ai_model_deployment_monitoring_job(name: "does_not_exit", region: <%= doc_generation ? "' #{model_deployment_monitoring_job['region']}'":"model_deployment_monitoring_job['region']" -%>) do + it { should_not exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_model_deployment_monitoring_job/google_vertex_ai_model_deployment_monitoring_job_attributes.erb b/mmv1/templates/inspec/examples/google_vertex_ai_model_deployment_monitoring_job/google_vertex_ai_model_deployment_monitoring_job_attributes.erb new file mode 100644 index 000000000..323d9f12b --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_model_deployment_monitoring_job/google_vertex_ai_model_deployment_monitoring_job_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + model_deployment_monitoring_job = input('model_deployment_monitoring_job', value: <%= JSON.pretty_generate(grab_attributes(pwd)['model_deployment_monitoring_job']) -%>, description: 'model_deployment_monitoring_job description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_model_deployment_monitoring_job/google_vertex_ai_model_deployment_monitoring_jobs.erb b/mmv1/templates/inspec/examples/google_vertex_ai_model_deployment_monitoring_job/google_vertex_ai_model_deployment_monitoring_jobs.erb new file mode 100644 index 000000000..78b31b1ca --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_model_deployment_monitoring_job/google_vertex_ai_model_deployment_monitoring_jobs.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% model_deployment_monitoring_job = grab_attributes(pwd)['model_deployment_monitoring_job'] -%> + describe google_vertex_ai_model_deployment_monitoring_jobs(parent: "projects/#{gcp_project_id}/locations/#{model_deployment_monitoring_job['region']}", region: <%= doc_generation ? "' #{model_deployment_monitoring_job['region']}'":"model_deployment_monitoring_job['region']" -%>) do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_model_evaluation_slice/google_vertex_ai_model_evaluation_slice.erb b/mmv1/templates/inspec/examples/google_vertex_ai_model_evaluation_slice/google_vertex_ai_model_evaluation_slice.erb new file mode 100644 index 000000000..007a1a981 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_model_evaluation_slice/google_vertex_ai_model_evaluation_slice.erb @@ -0,0 +1,13 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% models_evaluations_slice = grab_attributes(pwd)['models_evaluations_slice'] -%> +describe google_vertex_ai_models_evaluations_slice(name: "projects/#{gcp_project_id}/locations/#{models_evaluations_slice['region']}/models/#{models_evaluations_slice['model']}/evaluations/#{models_evaluations_slice['evaluation']}/slices/#{models_evaluations_slice['slice']}", region: <%= doc_generation ? "' #{models_evaluations_slice['region']}'":"models_evaluations_slice['region']" -%>) do + it { should exist } + its('create_time') { should cmp <%= doc_generation ? "'#{models_evaluations_slice['create_time']}'" : "models_evaluations_slice['create_time']" -%> } + its('name') { should cmp <%= doc_generation ? "'#{models_evaluations_slice['name']}'" : "models_evaluations_slice['name']" -%> } + its('metrics_schema_uri') { should cmp <%= doc_generation ? "'#{models_evaluations_slice['metrics_schema_uri']}'" : "models_evaluations_slice['metrics_schema_uri']" -%> } + +end + +describe google_vertex_ai_models_evaluations_slice(name: "does_not_exit", region: <%= doc_generation ? "' #{models_evaluations_slice['region']}'":"models_evaluations_slice['region']" -%>) do + it { should_not exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_model_evaluation_slice/google_vertex_ai_model_evaluation_slice_attributes.erb b/mmv1/templates/inspec/examples/google_vertex_ai_model_evaluation_slice/google_vertex_ai_model_evaluation_slice_attributes.erb new file mode 100644 index 000000000..1416dcdbf --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_model_evaluation_slice/google_vertex_ai_model_evaluation_slice_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + models_evaluations_slice = input('models_evaluations_slice', value: <%= JSON.pretty_generate(grab_attributes(pwd)['models_evaluations_slice']) -%>, description: 'models_evaluations_slice description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_model_evaluation_slice/google_vertex_ai_model_evaluation_slices.erb b/mmv1/templates/inspec/examples/google_vertex_ai_model_evaluation_slice/google_vertex_ai_model_evaluation_slices.erb new file mode 100644 index 000000000..f542767fb --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_model_evaluation_slice/google_vertex_ai_model_evaluation_slices.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% models_evaluations_slice = grab_attributes(pwd)['models_evaluations_slice'] -%> + describe google_vertex_ai_models_evaluations_slices(parent: "projects/#{gcp_project_id}/locations/#{models_evaluations_slice['region']}/models/#{models_evaluations_slice['model']}/evaluations/#{models_evaluations_slice['evaluation']}", region: <%= doc_generation ? "' #{models_evaluations_slice['region']}'":"models_evaluations_slice['region']" -%>) do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_models_evaluation/google_vertex_ai_models_evaluation.erb b/mmv1/templates/inspec/examples/google_vertex_ai_models_evaluation/google_vertex_ai_models_evaluation.erb new file mode 100644 index 000000000..0ae7e0fa3 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_models_evaluation/google_vertex_ai_models_evaluation.erb @@ -0,0 +1,16 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% models_evaluation = grab_attributes(pwd)['models_evaluation'] -%> +describe google_vertex_ai_models_evaluation(name: "projects/#{gcp_project_id}/locations/#{models_evaluation['region']}/models/#{models_evaluation['model']}/evaluations/#{models_evaluation['name']}", region: <%= doc_generation ? "' #{models_evaluation['region']}'":"models_evaluation['region']" -%>) do + it { should exist } + its('data_item_schema_uri') { should cmp <%= doc_generation ? "'#{models_evaluation['data_item_schema_uri']}'" : "models_evaluation['data_item_schema_uri']" -%> } + its('metrics_schema_uri') { should cmp <%= doc_generation ? "'#{models_evaluation['metrics_schema_uri']}'" : "models_evaluation['metrics_schema_uri']" -%> } + its('create_time') { should cmp <%= doc_generation ? "'#{models_evaluation['create_time']}'" : "models_evaluation['create_time']" -%> } + its('annotation_schema_uri') { should cmp <%= doc_generation ? "'#{models_evaluation['annotation_schema_uri']}'" : "models_evaluation['annotation_schema_uri']" -%> } + its('name') { should cmp <%= doc_generation ? "'#{models_evaluation['name']}'" : "models_evaluation['name']" -%> } + its('display_name') { should cmp <%= doc_generation ? "'#{models_evaluation['display_name']}'" : "models_evaluation['display_name']" -%> } + +end + +describe google_vertex_ai_models_evaluation(name: "does_not_exit", region: <%= doc_generation ? "' #{models_evaluation['region']}'":"models_evaluation['region']" -%>) do + it { should_not exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_models_evaluation/google_vertex_ai_models_evaluation_attributes.erb b/mmv1/templates/inspec/examples/google_vertex_ai_models_evaluation/google_vertex_ai_models_evaluation_attributes.erb new file mode 100644 index 000000000..fb48c1011 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_models_evaluation/google_vertex_ai_models_evaluation_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + models_evaluation = input('models_evaluation', value: <%= JSON.pretty_generate(grab_attributes(pwd)['models_evaluation']) -%>, description: 'models_evaluation description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_models_evaluation/google_vertex_ai_models_evaluations.erb b/mmv1/templates/inspec/examples/google_vertex_ai_models_evaluation/google_vertex_ai_models_evaluations.erb new file mode 100644 index 000000000..e7f7c8a16 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_models_evaluation/google_vertex_ai_models_evaluations.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% models_evaluation = grab_attributes(pwd)['models_evaluation'] -%> + describe google_vertex_ai_models_evaluations(parent: "projects/#{gcp_project_id}/locations/#{models_evaluation['region']}/models/#{models_evaluation['model']}", region: <%= doc_generation ? "' #{models_evaluation['region']}'":"models_evaluation['region']" -%>) do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_nas_job/google_vertex_ai_nas_job.erb b/mmv1/templates/inspec/examples/google_vertex_ai_nas_job/google_vertex_ai_nas_job.erb new file mode 100644 index 000000000..165aa109f --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_nas_job/google_vertex_ai_nas_job.erb @@ -0,0 +1,17 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% nas_job = grab_attributes(pwd)['nas_job'] -%> +describe google_vertex_ai_nas_job(name: "projects/#{gcp_project_id}/locations/#{nas_job['region']}/nasJobs/#{nas_job['name']}", region: <%= doc_generation ? "' #{nas_job['region']}'":"nas_job['region']" -%>) do + it { should exist } + its('name') { should cmp <%= doc_generation ? "'#{nas_job['name']}'" : "nas_job['name']" -%> } + its('end_time') { should cmp <%= doc_generation ? "'#{nas_job['end_time']}'" : "nas_job['end_time']" -%> } + its('state') { should cmp <%= doc_generation ? "'#{nas_job['state']}'" : "nas_job['state']" -%> } + its('create_time') { should cmp <%= doc_generation ? "'#{nas_job['create_time']}'" : "nas_job['create_time']" -%> } + its('display_name') { should cmp <%= doc_generation ? "'#{nas_job['display_name']}'" : "nas_job['display_name']" -%> } + its('start_time') { should cmp <%= doc_generation ? "'#{nas_job['start_time']}'" : "nas_job['start_time']" -%> } + its('update_time') { should cmp <%= doc_generation ? "'#{nas_job['update_time']}'" : "nas_job['update_time']" -%> } + +end + +describe google_vertex_ai_nas_job(name: "does_not_exit", region: <%= doc_generation ? "' #{nas_job['region']}'":"nas_job['region']" -%>) do + it { should_not exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_nas_job/google_vertex_ai_nas_job_attributes.erb b/mmv1/templates/inspec/examples/google_vertex_ai_nas_job/google_vertex_ai_nas_job_attributes.erb new file mode 100644 index 000000000..0b81f6a46 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_nas_job/google_vertex_ai_nas_job_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + nas_job = input('nas_job', value: <%= JSON.pretty_generate(grab_attributes(pwd)['nas_job']) -%>, description: 'nas_job description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_nas_job/google_vertex_ai_nas_jobs.erb b/mmv1/templates/inspec/examples/google_vertex_ai_nas_job/google_vertex_ai_nas_jobs.erb new file mode 100644 index 000000000..59ae2449b --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_nas_job/google_vertex_ai_nas_jobs.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% nas_job = grab_attributes(pwd)['nas_job'] -%> + describe google_vertex_ai_nas_jobs(parent: "projects/#{gcp_project_id}/locations/#{nas_job['region']}", region: <%= doc_generation ? "' #{nas_job['region']}'":"nas_job['region']" -%>) do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_nas_jobs_nas_trial_detail/google_vertex_ai_nas_jobs_nas_trial_detail.erb b/mmv1/templates/inspec/examples/google_vertex_ai_nas_jobs_nas_trial_detail/google_vertex_ai_nas_jobs_nas_trial_detail.erb new file mode 100644 index 000000000..af422b963 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_nas_jobs_nas_trial_detail/google_vertex_ai_nas_jobs_nas_trial_detail.erb @@ -0,0 +1,12 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% nas_jobs_nas_trial_detail = grab_attributes(pwd)['nas_jobs_nas_trial_detail'] -%> +describe google_vertex_ai_nas_jobs_nas_trial_detail(name: "projects/#{gcp_project_id}/locations/#{nas_jobs_nas_trial_detail['region']}/nasJobs/#{nas_jobs_nas_trial_detail['nasJob']}/nasTrialDetails/#{nas_jobs_nas_trial_detail['name']}", region: <%= doc_generation ? "' #{nas_jobs_nas_trial_detail['region']}'":"nas_jobs_nas_trial_detail['region']" -%>) do + it { should exist } + its('parameters') { should cmp <%= doc_generation ? "'#{nas_jobs_nas_trial_detail['parameters']}'" : "nas_jobs_nas_trial_detail['parameters']" -%> } + its('name') { should cmp <%= doc_generation ? "'#{nas_jobs_nas_trial_detail['name']}'" : "nas_jobs_nas_trial_detail['name']" -%> } + +end + +describe google_vertex_ai_nas_jobs_nas_trial_detail(name: "does_not_exit", region: <%= doc_generation ? "' #{nas_jobs_nas_trial_detail['region']}'":"nas_jobs_nas_trial_detail['region']" -%>) do + it { should_not exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_nas_jobs_nas_trial_detail/google_vertex_ai_nas_jobs_nas_trial_detail_attributes.erb b/mmv1/templates/inspec/examples/google_vertex_ai_nas_jobs_nas_trial_detail/google_vertex_ai_nas_jobs_nas_trial_detail_attributes.erb new file mode 100644 index 000000000..186134763 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_nas_jobs_nas_trial_detail/google_vertex_ai_nas_jobs_nas_trial_detail_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + nas_jobs_nas_trial_detail = input('nas_jobs_nas_trial_detail', value: <%= JSON.pretty_generate(grab_attributes(pwd)['nas_jobs_nas_trial_detail']) -%>, description: 'nas_jobs_nas_trial_detail description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_nas_jobs_nas_trial_detail/google_vertex_ai_nas_jobs_nas_trial_details.erb b/mmv1/templates/inspec/examples/google_vertex_ai_nas_jobs_nas_trial_detail/google_vertex_ai_nas_jobs_nas_trial_details.erb new file mode 100644 index 000000000..4b3d4abbb --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_nas_jobs_nas_trial_detail/google_vertex_ai_nas_jobs_nas_trial_details.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% nas_jobs_nas_trial_detail = grab_attributes(pwd)['nas_jobs_nas_trial_detail'] -%> + describe google_vertex_ai_nas_jobs_nas_trial_details(parent: "projects/#{gcp_project_id}/locations/#{nas_jobs_nas_trial_detail['region']}/nasJobs/#{nas_jobs_nas_trial_detail['nasJob']}", region: <%= doc_generation ? "' #{nas_jobs_nas_trial_detail['region']}'":"nas_jobs_nas_trial_detail['region']" -%>) do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_pipeline_job/google_vertex_ai_pipeline_job.erb b/mmv1/templates/inspec/examples/google_vertex_ai_pipeline_job/google_vertex_ai_pipeline_job.erb new file mode 100644 index 000000000..08c243aac --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_pipeline_job/google_vertex_ai_pipeline_job.erb @@ -0,0 +1,21 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% pipeline_job = grab_attributes(pwd)['pipeline_job'] -%> +describe google_vertex_ai_pipeline_job(name: "projects/#{gcp_project_id}/locations/#{pipeline_job['region']}/pipelineJobs/#{pipeline_job['name']}", region: <%= doc_generation ? "' #{pipeline_job['region']}'":"pipeline_job['region']" -%>) do + it { should exist } + its('end_time') { should cmp <%= doc_generation ? "'#{pipeline_job['end_time']}'" : "pipeline_job['end_time']" -%> } + its('update_time') { should cmp <%= doc_generation ? "'#{pipeline_job['update_time']}'" : "pipeline_job['update_time']" -%> } + its('state') { should cmp <%= doc_generation ? "'#{pipeline_job['state']}'" : "pipeline_job['state']" -%> } + its('create_time') { should cmp <%= doc_generation ? "'#{pipeline_job['create_time']}'" : "pipeline_job['create_time']" -%> } + its('name') { should cmp <%= doc_generation ? "'#{pipeline_job['name']}'" : "pipeline_job['name']" -%> } + its('schedule_name') { should cmp <%= doc_generation ? "'#{pipeline_job['schedule_name']}'" : "pipeline_job['schedule_name']" -%> } + its('start_time') { should cmp <%= doc_generation ? "'#{pipeline_job['start_time']}'" : "pipeline_job['start_time']" -%> } + its('service_account') { should cmp <%= doc_generation ? "'#{pipeline_job['service_account']}'" : "pipeline_job['service_account']" -%> } + its('display_name') { should cmp <%= doc_generation ? "'#{pipeline_job['display_name']}'" : "pipeline_job['display_name']" -%> } + its('template_uri') { should cmp <%= doc_generation ? "'#{pipeline_job['template_uri']}'" : "pipeline_job['template_uri']" -%> } + its('network') { should cmp <%= doc_generation ? "'#{pipeline_job['network']}'" : "pipeline_job['network']" -%> } + +end + +describe google_vertex_ai_pipeline_job(name: "does_not_exit", region: <%= doc_generation ? "' #{pipeline_job['region']}'":"pipeline_job['region']" -%>) do + it { should_not exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_pipeline_job/google_vertex_ai_pipeline_job_attributes.erb b/mmv1/templates/inspec/examples/google_vertex_ai_pipeline_job/google_vertex_ai_pipeline_job_attributes.erb new file mode 100644 index 000000000..e61c16fb1 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_pipeline_job/google_vertex_ai_pipeline_job_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + pipeline_job = input('pipeline_job', value: <%= JSON.pretty_generate(grab_attributes(pwd)['pipeline_job']) -%>, description: 'pipeline_job description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_pipeline_job/google_vertex_ai_pipeline_jobs.erb b/mmv1/templates/inspec/examples/google_vertex_ai_pipeline_job/google_vertex_ai_pipeline_jobs.erb new file mode 100644 index 000000000..d876ca4de --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_pipeline_job/google_vertex_ai_pipeline_jobs.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% pipeline_job = grab_attributes(pwd)['pipeline_job'] -%> + describe google_vertex_ai_pipeline_jobs(parent: "projects/#{gcp_project_id}/locations/#{pipeline_job['region']}", region: <%= doc_generation ? "' #{pipeline_job['region']}'":"pipeline_job['region']" -%>) do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_schedule/google_vertex_ai_schedule.erb b/mmv1/templates/inspec/examples/google_vertex_ai_schedule/google_vertex_ai_schedule.erb new file mode 100644 index 000000000..5e4531504 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_schedule/google_vertex_ai_schedule.erb @@ -0,0 +1,24 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% schedule = grab_attributes(pwd)['schedule'] -%> +describe google_vertex_ai_schedule(name: "projects/#{gcp_project_id}/locations/#{schedule['region']}/schedules/#{schedule['name']}", region: <%= doc_generation ? "' #{schedule['region']}'":"schedule['region']" -%>) do + it { should exist } + its('started_run_count') { should cmp <%= doc_generation ? "'#{schedule['started_run_count']}'" : "schedule['started_run_count']" -%> } + its('name') { should cmp <%= doc_generation ? "'#{schedule['name']}'" : "schedule['name']" -%> } + its('cron') { should cmp <%= doc_generation ? "'#{schedule['cron']}'" : "schedule['cron']" -%> } + its('last_pause_time') { should cmp <%= doc_generation ? "'#{schedule['last_pause_time']}'" : "schedule['last_pause_time']" -%> } + its('create_time') { should cmp <%= doc_generation ? "'#{schedule['create_time']}'" : "schedule['create_time']" -%> } + its('start_time') { should cmp <%= doc_generation ? "'#{schedule['start_time']}'" : "schedule['start_time']" -%> } + its('max_run_count') { should cmp <%= doc_generation ? "'#{schedule['max_run_count']}'" : "schedule['max_run_count']" -%> } + its('next_run_time') { should cmp <%= doc_generation ? "'#{schedule['next_run_time']}'" : "schedule['next_run_time']" -%> } + its('update_time') { should cmp <%= doc_generation ? "'#{schedule['update_time']}'" : "schedule['update_time']" -%> } + its('last_resume_time') { should cmp <%= doc_generation ? "'#{schedule['last_resume_time']}'" : "schedule['last_resume_time']" -%> } + its('max_concurrent_run_count') { should cmp <%= doc_generation ? "'#{schedule['max_concurrent_run_count']}'" : "schedule['max_concurrent_run_count']" -%> } + its('state') { should cmp <%= doc_generation ? "'#{schedule['state']}'" : "schedule['state']" -%> } + its('display_name') { should cmp <%= doc_generation ? "'#{schedule['display_name']}'" : "schedule['display_name']" -%> } + its('end_time') { should cmp <%= doc_generation ? "'#{schedule['end_time']}'" : "schedule['end_time']" -%> } + +end + +describe google_vertex_ai_schedule(name: "does_not_exit", region: <%= doc_generation ? "' #{schedule['region']}'":"schedule['region']" -%>) do + it { should_not exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_schedule/google_vertex_ai_schedule_attributes.erb b/mmv1/templates/inspec/examples/google_vertex_ai_schedule/google_vertex_ai_schedule_attributes.erb new file mode 100644 index 000000000..7ee7b92e4 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_schedule/google_vertex_ai_schedule_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + schedule = input('schedule', value: <%= JSON.pretty_generate(grab_attributes(pwd)['schedule']) -%>, description: 'schedule description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_schedule/google_vertex_ai_schedules.erb b/mmv1/templates/inspec/examples/google_vertex_ai_schedule/google_vertex_ai_schedules.erb new file mode 100644 index 000000000..ab03d0199 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_schedule/google_vertex_ai_schedules.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% schedule = grab_attributes(pwd)['schedule'] -%> + describe google_vertex_ai_schedules(parent: "projects/#{gcp_project_id}/locations/#{schedule['region']}", region: <%= doc_generation ? "' #{schedule['region']}'":"schedule['region']" -%>) do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_studies_trial/google_vertex_ai_studies_trial.erb b/mmv1/templates/inspec/examples/google_vertex_ai_studies_trial/google_vertex_ai_studies_trial.erb new file mode 100644 index 000000000..dd4698587 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_studies_trial/google_vertex_ai_studies_trial.erb @@ -0,0 +1,18 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% studies_trial = grab_attributes(pwd)['studies_trial'] -%> +describe google_vertex_ai_studies_trial(name: "projects/#{gcp_project_id}/locations/#{studies_trial['region']}/studies/#{studies_trial['study']}/trials/#{studies_trial['name']}", region: <%= doc_generation ? "' #{studies_trial['region']}'":"studies_trial['region']" -%>) do + it { should exist } + its('start_time') { should cmp <%= doc_generation ? "'#{studies_trial['start_time']}'" : "studies_trial['start_time']" -%> } + its('end_time') { should cmp <%= doc_generation ? "'#{studies_trial['end_time']}'" : "studies_trial['end_time']" -%> } + its('name') { should cmp <%= doc_generation ? "'#{studies_trial['name']}'" : "studies_trial['name']" -%> } + its('infeasible_reason') { should cmp <%= doc_generation ? "'#{studies_trial['infeasible_reason']}'" : "studies_trial['infeasible_reason']" -%> } + its('client_id') { should cmp <%= doc_generation ? "'#{studies_trial['client_id']}'" : "studies_trial['client_id']" -%> } + its('custom_job') { should cmp <%= doc_generation ? "'#{studies_trial['custom_job']}'" : "studies_trial['custom_job']" -%> } + its('state') { should cmp <%= doc_generation ? "'#{studies_trial['state']}'" : "studies_trial['state']" -%> } + its('id') { should cmp <%= doc_generation ? "'#{studies_trial['id']}'" : "studies_trial['id']" -%> } + +end + +describe google_vertex_ai_studies_trial(name: "does_not_exit", region: <%= doc_generation ? "' #{studies_trial['region']}'":"studies_trial['region']" -%>) do + it { should_not exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_studies_trial/google_vertex_ai_studies_trial_attributes.erb b/mmv1/templates/inspec/examples/google_vertex_ai_studies_trial/google_vertex_ai_studies_trial_attributes.erb new file mode 100644 index 000000000..299b5566b --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_studies_trial/google_vertex_ai_studies_trial_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + studies_trial = input('studies_trial', value: <%= JSON.pretty_generate(grab_attributes(pwd)['studies_trial']) -%>, description: 'studies_trial description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_studies_trial/google_vertex_ai_studies_trials.erb b/mmv1/templates/inspec/examples/google_vertex_ai_studies_trial/google_vertex_ai_studies_trials.erb new file mode 100644 index 000000000..88c13fcb6 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_studies_trial/google_vertex_ai_studies_trials.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% studies_trial = grab_attributes(pwd)['studies_trial'] -%> + describe google_vertex_ai_studies_trials(parent: "projects/#{gcp_project_id}/locations/#{studies_trial['region']}/studies/#{studies_trial['study']}", region: <%= doc_generation ? "' #{studies_trial['region']}'":"studies_trial['region']" -%>) do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_study/google_vertex_ai_studies.erb b/mmv1/templates/inspec/examples/google_vertex_ai_study/google_vertex_ai_studies.erb new file mode 100644 index 000000000..71e76df7c --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_study/google_vertex_ai_studies.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% study = grab_attributes(pwd)['study'] -%> + describe google_vertex_ai_studies(parent: "projects/#{gcp_project_id}/locations/#{study['region']}", region: <%= doc_generation ? "' #{study['region']}'":"study['region']" -%>) do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_study/google_vertex_ai_study.erb b/mmv1/templates/inspec/examples/google_vertex_ai_study/google_vertex_ai_study.erb new file mode 100644 index 000000000..5c2125084 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_study/google_vertex_ai_study.erb @@ -0,0 +1,15 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% study = grab_attributes(pwd)['study'] -%> +describe google_vertex_ai_study(name: "projects/#{gcp_project_id}/locations/#{study['region']}/studies/#{study['name']}", region: <%= doc_generation ? "' #{study['region']}'":"study['region']" -%>) do + it { should exist } + its('name') { should cmp <%= doc_generation ? "'#{study['name']}'" : "study['name']" -%> } + its('display_name') { should cmp <%= doc_generation ? "'#{study['display_name']}'" : "study['display_name']" -%> } + its('state') { should cmp <%= doc_generation ? "'#{study['state']}'" : "study['state']" -%> } + its('create_time') { should cmp <%= doc_generation ? "'#{study['create_time']}'" : "study['create_time']" -%> } + its('inactive_reason') { should cmp <%= doc_generation ? "'#{study['inactive_reason']}'" : "study['inactive_reason']" -%> } + +end + +describe google_vertex_ai_study(name: "does_not_exit", region: <%= doc_generation ? "' #{study['region']}'":"study['region']" -%>) do + it { should_not exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_study/google_vertex_ai_study_attributes.erb b/mmv1/templates/inspec/examples/google_vertex_ai_study/google_vertex_ai_study_attributes.erb new file mode 100644 index 000000000..8e0d2e6e7 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_study/google_vertex_ai_study_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + study = input('study', value: <%= JSON.pretty_generate(grab_attributes(pwd)['study']) -%>, description: 'study description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_tensorboard/google_vertex_ai_tensorboard.erb b/mmv1/templates/inspec/examples/google_vertex_ai_tensorboard/google_vertex_ai_tensorboard.erb new file mode 100644 index 000000000..e8b30dc66 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_tensorboard/google_vertex_ai_tensorboard.erb @@ -0,0 +1,17 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% tensorboard = grab_attributes(pwd)['tensorboard'] -%> +describe google_vertex_ai_tensorboard(name: "projects/#{gcp_project_id}/locations/#{tensorboard['region']}/tensorboards/#{tensorboard['name']}", region: <%= doc_generation ? "' #{tensorboard['region']}'":"tensorboard['region']" -%>) do + it { should exist } + its('name') { should cmp <%= doc_generation ? "'#{tensorboard['name']}'" : "tensorboard['name']" -%> } + its('update_time') { should cmp <%= doc_generation ? "'#{tensorboard['update_time']}'" : "tensorboard['update_time']" -%> } + its('blob_storage_path_prefix') { should cmp <%= doc_generation ? "'#{tensorboard['blob_storage_path_prefix']}'" : "tensorboard['blob_storage_path_prefix']" -%> } + its('etag') { should cmp <%= doc_generation ? "'#{tensorboard['etag']}'" : "tensorboard['etag']" -%> } + its('create_time') { should cmp <%= doc_generation ? "'#{tensorboard['create_time']}'" : "tensorboard['create_time']" -%> } + its('display_name') { should cmp <%= doc_generation ? "'#{tensorboard['display_name']}'" : "tensorboard['display_name']" -%> } + its('description') { should cmp <%= doc_generation ? "'#{tensorboard['description']}'" : "tensorboard['description']" -%> } + +end + +describe google_vertex_ai_tensorboard(name: "does_not_exit", region: <%= doc_generation ? "' #{tensorboard['region']}'":"tensorboard['region']" -%>) do + it { should_not exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_tensorboard/google_vertex_ai_tensorboard_attributes.erb b/mmv1/templates/inspec/examples/google_vertex_ai_tensorboard/google_vertex_ai_tensorboard_attributes.erb new file mode 100644 index 000000000..d5a2f4d15 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_tensorboard/google_vertex_ai_tensorboard_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + tensorboard = input('tensorboard', value: <%= JSON.pretty_generate(grab_attributes(pwd)['tensorboard']) -%>, description: 'tensorboard description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_tensorboard/google_vertex_ai_tensorboards.erb b/mmv1/templates/inspec/examples/google_vertex_ai_tensorboard/google_vertex_ai_tensorboards.erb new file mode 100644 index 000000000..2e221d0b7 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_tensorboard/google_vertex_ai_tensorboards.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% tensorboard = grab_attributes(pwd)['tensorboard'] -%> + describe google_vertex_ai_tensorboards(parent: "projects/#{gcp_project_id}/locations/#{tensorboard['region']}", region: <%= doc_generation ? "' #{tensorboard['region']}'":"tensorboard['region']" -%>) do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_tensorboard_experiment_run/google_vertex_ai_tensorboard_experiment_run.erb b/mmv1/templates/inspec/examples/google_vertex_ai_tensorboard_experiment_run/google_vertex_ai_tensorboard_experiment_run.erb new file mode 100644 index 000000000..3c0a384cc --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_tensorboard_experiment_run/google_vertex_ai_tensorboard_experiment_run.erb @@ -0,0 +1,10 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% tensorboard_experiment_run = grab_attributes(pwd)['tensorboard_experiment_run'] -%> +describe google_vertex_ai_tensorboard_experiment_run(name: "projects/#{gcp_project_id}/locations/#{tensorboard_experiment_run['region']}/tensorboards/#{tensorboard_experiment_run['tensorboard']}/experiments/#{tensorboard_experiment_run['experiment']}/runs/#{tensorboard_experiment_run['run']}", region: <%= doc_generation ? "' #{tensorboard_experiment_run['region']}'":"tensorboard_experiment_run['region']" -%>) do + it { should exist } + +end + +describe google_vertex_ai_tensorboard_experiment_run(name: "does_not_exit", region: <%= doc_generation ? "' #{tensorboard_experiment_run['region']}'":"tensorboard_experiment_run['region']" -%>) do + it { should_not exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_tensorboard_experiment_run/google_vertex_ai_tensorboard_experiment_run_attributes.erb b/mmv1/templates/inspec/examples/google_vertex_ai_tensorboard_experiment_run/google_vertex_ai_tensorboard_experiment_run_attributes.erb new file mode 100644 index 000000000..7bf0be09f --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_tensorboard_experiment_run/google_vertex_ai_tensorboard_experiment_run_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + tensorboard_experiment_run = input('tensorboard_experiment_run', value: <%= JSON.pretty_generate(grab_attributes(pwd)['tensorboard_experiment_run']) -%>, description: 'tensorboard_experiment_run description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_tensorboard_experiment_run/google_vertex_ai_tensorboard_experiment_runs.erb b/mmv1/templates/inspec/examples/google_vertex_ai_tensorboard_experiment_run/google_vertex_ai_tensorboard_experiment_runs.erb new file mode 100644 index 000000000..19b887d0b --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_tensorboard_experiment_run/google_vertex_ai_tensorboard_experiment_runs.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% tensorboard_experiment_run = grab_attributes(pwd)['tensorboard_experiment_run'] -%> + describe google_vertex_ai_tensorboard_experiment_runs(parent: "projects/#{gcp_project_id}/locations/#{tensorboard_experiment_run['region']}/tensorboards/#{tensorboard_experiment_run['tensorboard']}/experiments/#{tensorboard_experiment_run['experiment']}", region: <%= doc_generation ? "' #{tensorboard_experiment_run['region']}'":"tensorboard_experiment_run['region']" -%>) do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_tensorboard_experiment_run_time_series_resource/google_vertex_ai_tensorboard_experiment_run_time_series_resource.erb b/mmv1/templates/inspec/examples/google_vertex_ai_tensorboard_experiment_run_time_series_resource/google_vertex_ai_tensorboard_experiment_run_time_series_resource.erb new file mode 100644 index 000000000..cbea6cdf4 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_tensorboard_experiment_run_time_series_resource/google_vertex_ai_tensorboard_experiment_run_time_series_resource.erb @@ -0,0 +1,19 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% tensorboard_experiment_run_time_series_resource = grab_attributes(pwd)['tensorboard_experiment_run_time_series_resource'] -%> +describe google_vertex_ai_tensorboard_experiment_run_time_series_resource(name: "projects/#{gcp_project_id}/locations/#{tensorboard_experiment_run_time_series_resource['region']}/tensorboards/#{tensorboard_experiment_run_time_series_resource['tensorboard']}/experiments/#{tensorboard_experiment_run_time_series_resource['experiment']}/runs/#{tensorboard_experiment_run_time_series_resource['run']}/timeSeries/#{tensorboard_experiment_run_time_series_resource['timeSery']}", region: <%= doc_generation ? "' #{tensorboard_experiment_run_time_series_resource['region']}'":"tensorboard_experiment_run_time_series_resource['region']" -%>) do + it { should exist } + its('plugin_name') { should cmp <%= doc_generation ? "'#{tensorboard_experiment_run_time_series_resource['plugin_name']}'" : "tensorboard_experiment_run_time_series_resource['plugin_name']" -%> } + its('plugin_data') { should cmp <%= doc_generation ? "'#{tensorboard_experiment_run_time_series_resource['plugin_data']}'" : "tensorboard_experiment_run_time_series_resource['plugin_data']" -%> } + its('description') { should cmp <%= doc_generation ? "'#{tensorboard_experiment_run_time_series_resource['description']}'" : "tensorboard_experiment_run_time_series_resource['description']" -%> } + its('etag') { should cmp <%= doc_generation ? "'#{tensorboard_experiment_run_time_series_resource['etag']}'" : "tensorboard_experiment_run_time_series_resource['etag']" -%> } + its('display_name') { should cmp <%= doc_generation ? "'#{tensorboard_experiment_run_time_series_resource['display_name']}'" : "tensorboard_experiment_run_time_series_resource['display_name']" -%> } + its('update_time') { should cmp <%= doc_generation ? "'#{tensorboard_experiment_run_time_series_resource['update_time']}'" : "tensorboard_experiment_run_time_series_resource['update_time']" -%> } + its('create_time') { should cmp <%= doc_generation ? "'#{tensorboard_experiment_run_time_series_resource['create_time']}'" : "tensorboard_experiment_run_time_series_resource['create_time']" -%> } + its('name') { should cmp <%= doc_generation ? "'#{tensorboard_experiment_run_time_series_resource['name']}'" : "tensorboard_experiment_run_time_series_resource['name']" -%> } + its('value_type') { should cmp <%= doc_generation ? "'#{tensorboard_experiment_run_time_series_resource['value_type']}'" : "tensorboard_experiment_run_time_series_resource['value_type']" -%> } + +end + +describe google_vertex_ai_tensorboard_experiment_run_time_series_resource(name: "does_not_exit", region: <%= doc_generation ? "' #{tensorboard_experiment_run_time_series_resource['region']}'":"tensorboard_experiment_run_time_series_resource['region']" -%>) do + it { should_not exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_tensorboard_experiment_run_time_series_resource/google_vertex_ai_tensorboard_experiment_run_time_series_resource_attributes.erb b/mmv1/templates/inspec/examples/google_vertex_ai_tensorboard_experiment_run_time_series_resource/google_vertex_ai_tensorboard_experiment_run_time_series_resource_attributes.erb new file mode 100644 index 000000000..4671909bf --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_tensorboard_experiment_run_time_series_resource/google_vertex_ai_tensorboard_experiment_run_time_series_resource_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + tensorboard_experiment_run_time_series_resource = input('tensorboard_experiment_run_time_series_resource', value: <%= JSON.pretty_generate(grab_attributes(pwd)['tensorboard_experiment_run_time_series_resource']) -%>, description: 'tensorboard_experiment_run_time_series_resource description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_tensorboard_experiment_run_time_series_resource/google_vertex_ai_tensorboard_experiment_run_time_series_resources.erb b/mmv1/templates/inspec/examples/google_vertex_ai_tensorboard_experiment_run_time_series_resource/google_vertex_ai_tensorboard_experiment_run_time_series_resources.erb new file mode 100644 index 000000000..267fd19b2 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_tensorboard_experiment_run_time_series_resource/google_vertex_ai_tensorboard_experiment_run_time_series_resources.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% tensorboard_experiment_run_time_series_resource = grab_attributes(pwd)['tensorboard_experiment_run_time_series_resource'] -%> + describe google_vertex_ai_tensorboard_experiment_run_time_series_resources(parent: "projects/#{gcp_project_id}/locations/#{tensorboard_experiment_run_time_series_resource['region']}/tensorboards/#{tensorboard_experiment_run_time_series_resource['tensorboard']}/experiments/#{tensorboard_experiment_run_time_series_resource['experiment']}/runs/#{tensorboard_experiment_run_time_series_resource['run']}", region: <%= doc_generation ? "' #{tensorboard_experiment_run_time_series_resource['region']}'":"tensorboard_experiment_run_time_series_resource['region']" -%>) do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_tensorboards_experiment/google_vertex_ai_tensorboards_experiment.erb b/mmv1/templates/inspec/examples/google_vertex_ai_tensorboards_experiment/google_vertex_ai_tensorboards_experiment.erb new file mode 100644 index 000000000..f84ab5277 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_tensorboards_experiment/google_vertex_ai_tensorboards_experiment.erb @@ -0,0 +1,17 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> +<% tensorboards_experiment = grab_attributes(pwd)['tensorboards_experiment'] -%> +describe google_vertex_ai_tensorboards_experiment(name: "projects/#{gcp_project_id}/locations/#{tensorboards_experiment['region']}/tensorboards/#{tensorboards_experiment['tensorboard']}/experiments/#{tensorboards_experiment['name']}", region: <%= doc_generation ? "' #{tensorboards_experiment['region']}'":"tensorboards_experiment['region']" -%>) do + it { should exist } + its('description') { should cmp <%= doc_generation ? "'#{tensorboards_experiment['description']}'" : "tensorboards_experiment['description']" -%> } + its('source') { should cmp <%= doc_generation ? "'#{tensorboards_experiment['source']}'" : "tensorboards_experiment['source']" -%> } + its('display_name') { should cmp <%= doc_generation ? "'#{tensorboards_experiment['display_name']}'" : "tensorboards_experiment['display_name']" -%> } + its('create_time') { should cmp <%= doc_generation ? "'#{tensorboards_experiment['create_time']}'" : "tensorboards_experiment['create_time']" -%> } + its('update_time') { should cmp <%= doc_generation ? "'#{tensorboards_experiment['update_time']}'" : "tensorboards_experiment['update_time']" -%> } + its('name') { should cmp <%= doc_generation ? "'#{tensorboards_experiment['name']}'" : "tensorboards_experiment['name']" -%> } + its('etag') { should cmp <%= doc_generation ? "'#{tensorboards_experiment['etag']}'" : "tensorboards_experiment['etag']" -%> } + +end + +describe google_vertex_ai_tensorboards_experiment(name: "does_not_exit", region: <%= doc_generation ? "' #{tensorboards_experiment['region']}'":"tensorboards_experiment['region']" -%>) do + it { should_not exist } +end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_tensorboards_experiment/google_vertex_ai_tensorboards_experiment_attributes.erb b/mmv1/templates/inspec/examples/google_vertex_ai_tensorboards_experiment/google_vertex_ai_tensorboards_experiment_attributes.erb new file mode 100644 index 000000000..1e937ab61 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_tensorboards_experiment/google_vertex_ai_tensorboards_experiment_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + tensorboards_experiment = input('tensorboards_experiment', value: <%= JSON.pretty_generate(grab_attributes(pwd)['tensorboards_experiment']) -%>, description: 'tensorboards_experiment description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_tensorboards_experiment/google_vertex_ai_tensorboards_experiments.erb b/mmv1/templates/inspec/examples/google_vertex_ai_tensorboards_experiment/google_vertex_ai_tensorboards_experiments.erb new file mode 100644 index 000000000..36bbb0919 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_tensorboards_experiment/google_vertex_ai_tensorboards_experiments.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% tensorboards_experiment = grab_attributes(pwd)['tensorboards_experiment'] -%> + describe google_vertex_ai_tensorboards_experiments(parent: "projects/#{gcp_project_id}/locations/#{tensorboards_experiment['region']}/tensorboards/#{tensorboards_experiment['tensorboard']}", region: <%= doc_generation ? "' #{tensorboards_experiment['region']}'":"tensorboards_experiment['region']" -%>) do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_training_pipeline/google_vertex_ai_training_pipeline.erb b/mmv1/templates/inspec/examples/google_vertex_ai_training_pipeline/google_vertex_ai_training_pipeline.erb new file mode 100644 index 000000000..5147aec89 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_training_pipeline/google_vertex_ai_training_pipeline.erb @@ -0,0 +1,10 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% training_pipeline = grab_attributes(pwd)['training_pipeline'] -%> + + + describe google_vertex_ai_training_pipeline(name: "projects/#{gcp_project_id}/locations/#{training_pipeline['region']}/trainingPipelines/#{training_pipeline['job_id']}", region: <%= doc_generation ? "' #{training_pipeline['region']}'":"training_pipeline['region']" -%>) do + it { should exist } + end + describe google_vertex_ai_training_pipeline(name: "does_not_exit", region: <%= doc_generation ? "' #{training_pipeline['region']}'":"training_pipeline['region']" -%>) do + it { should_not exist } + end diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_training_pipeline/google_vertex_ai_training_pipeline_attributes.erb b/mmv1/templates/inspec/examples/google_vertex_ai_training_pipeline/google_vertex_ai_training_pipeline_attributes.erb new file mode 100644 index 000000000..a95b8e9c9 --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_training_pipeline/google_vertex_ai_training_pipeline_attributes.erb @@ -0,0 +1,3 @@ +gcp_project_id = input(:gcp_project_id, value: '<%= external_attribute(pwd, 'gcp_project_id') -%>', description: 'The GCP project identifier.') + + training_pipeline = input('training_pipeline', value: <%= JSON.pretty_generate(grab_attributes(pwd)['training_pipeline']) -%>, description: 'training_pipeline description') \ No newline at end of file diff --git a/mmv1/templates/inspec/examples/google_vertex_ai_training_pipeline/google_vertex_ai_training_pipelines.erb b/mmv1/templates/inspec/examples/google_vertex_ai_training_pipeline/google_vertex_ai_training_pipelines.erb new file mode 100644 index 000000000..5b358531d --- /dev/null +++ b/mmv1/templates/inspec/examples/google_vertex_ai_training_pipeline/google_vertex_ai_training_pipelines.erb @@ -0,0 +1,5 @@ +<% gcp_project_id = "#{external_attribute(pwd, 'gcp_project_id', doc_generation)}" -%> + <% training_pipeline = grab_attributes(pwd)['training_pipeline'] -%> + describe google_vertex_ai_training_pipelines(parent: "projects/#{gcp_project_id}/locations/#{training_pipeline['region']}", region: <%= doc_generation ? "' #{training_pipeline['region']}'":"training_pipeline['region']" -%>) do + it { should exist } + end \ No newline at end of file diff --git a/mmv1/templates/inspec/plural_resource.erb b/mmv1/templates/inspec/plural_resource.erb index a067301ea..705db2bb1 100644 --- a/mmv1/templates/inspec/plural_resource.erb +++ b/mmv1/templates/inspec/plural_resource.erb @@ -39,7 +39,7 @@ name = resource_name(object, product) filter_table_config.connect(self, :table) -<% +<% link_query_items = object&.nested_query&.keys&.first || object.collection_url_key -%> def initialize(params = {}) @@ -82,7 +82,7 @@ link_query_items = object&.nested_query&.keys&.first || object.collection_url_ke def transformers { <% object.all_user_properties.reject(&:exclude_plural).each do |prop| -%> - '<%= prop.api_name -%>' => ->(obj) { <%= "return :#{ prop.override_name || prop.out_name }, #{parse_code(prop, 'obj')}" -%> }, + '<%= prop.api_name -%>' => ->(obj) { <%= "[:#{ prop.override_name || prop.out_name }, #{parse_code(prop, 'obj')}]" -%> }, <% end # object.all_user_properties.each -%> } end diff --git a/mmv1/templates/inspec/tests/integration/build/gcp-mm.tf b/mmv1/templates/inspec/tests/integration/build/gcp-mm.tf index ec39079ff..a9bce3de4 100644 --- a/mmv1/templates/inspec/tests/integration/build/gcp-mm.tf +++ b/mmv1/templates/inspec/tests/integration/build/gcp-mm.tf @@ -1333,4 +1333,351 @@ variable "sql_connect" { resource "google_sql_ssl_cert" "client_cert" { common_name = var.sql_connect["common_name"] instance = var.gcp_db_instance_name -} \ No newline at end of file +} + +resource "google_data_loss_prevention_stored_info_type" "basic" { + parent = "projects/my-project-name" + description = "Description" + display_name = "Displayname" + + regex { + pattern = "patient" + group_indexes = [2] + } +} + +resource "google_compute_region_security_policy" "default" { + provider = google-beta + + region = "us-west2" + name = "policyruletest-${local.name_suffix}" + description = "basic region security policy" + type = "CLOUD_ARMOR" +} + +resource "google_compute_region_security_policy_rule" "policy_rule" { + provider = google-beta + + region = "us-west2" + security_policy = google_compute_region_security_policy.default.name + description = "new rule" + priority = 100 + match { + versioned_expr = "SRC_IPS_V1" + config { + src_ip_ranges = ["10.10.0.0/16"] + } + } + action = "allow" + preview = true +} + + +resource "google_compute_region_security_policy" "default" { + provider = google-beta + + region = "us-west2" + name = "policyruletest-${local.name_suffix}" + description = "basic region security policy" + type = "CLOUD_ARMOR" +} + +resource "google_compute_region_security_policy_rule" "policy_rule" { + provider = google-beta + + region = "us-west2" + security_policy = google_compute_region_security_policy.default.name + description = "new rule" + priority = 100 + match { + versioned_expr = "SRC_IPS_V1" + config { + src_ip_ranges = ["10.10.0.0/16"] + } + } + action = "allow" + preview = true +} + + +resource "google_compute_interconnect_attachment" "on_prem" { + name = "on-prem-attachment-${local.name_suffix}" + edge_availability_domain = "AVAILABILITY_DOMAIN_1" + type = "PARTNER" + router = google_compute_router.foobar.id + mtu = 1500 +} + +resource "google_compute_router" "foobar" { + name = "router-1-${local.name_suffix}" + network = google_compute_network.foobar.name + bgp { + asn = 16550 + } +} + +resource "google_compute_network" "foobar" { + name = "network-1-${local.name_suffix}" + auto_create_subnetworks = false +} + + + + + +resource "google_vertex_ai_tensorboard" "tensorboard" { + display_name = "terraform-${local.name_suffix}" + description = "sample description" + labels = { + "key1" : "value1", + "key2" : "value2" + } + region = "us-central1" +} + + +resource "google_ml_engine_model" "default" { + name = "default-${local.name_suffix}" + description = "My model" + regions = ["us-central1"] +} + + +resource "google_vertex_ai_featurestore" "featurestore" { + name = "terraform-${local.name_suffix}" + labels = { + foo = "bar" + } + region = "us-central1" + online_serving_config { + fixed_node_count = 2 + } +} + +resource "google_vertex_ai_featurestore_entitytype" "entity" { + name = "terraform-${local.name_suffix}" + labels = { + foo = "bar" + } + featurestore = google_vertex_ai_featurestore.featurestore.id +} + +resource "google_vertex_ai_featurestore_entitytype_feature" "feature" { + name = "terraform-${local.name_suffix}" + labels = { + foo = "bar" + } + entitytype = google_vertex_ai_featurestore_entitytype.entity.id + + value_type = "INT64_ARRAY" +} + + +resource "google_vertex_ai_index_endpoint" "index_endpoint" { + display_name = "sample-endpoint" + description = "A sample vertex endpoint" + region = "us-central1" + labels = { + label-one = "value-one" + } + network = "projects/${data.google_project.project.number}/global/networks/${data.google_compute_network.vertex_network.name}" + depends_on = [ + google_service_networking_connection.vertex_vpc_connection + ] +} + +resource "google_service_networking_connection" "vertex_vpc_connection" { + network = data.google_compute_network.vertex_network.id + service = "servicenetworking.googleapis.com" + reserved_peering_ranges = [google_compute_global_address.vertex_range.name] +} + +resource "google_compute_global_address" "vertex_range" { + name = "address-name-${local.name_suffix}" + purpose = "VPC_PEERING" + address_type = "INTERNAL" + prefix_length = 24 + network = data.google_compute_network.vertex_network.id +} + +data "google_compute_network" "vertex_network" { + name = "network-name-${local.name_suffix}" +} + +data "google_project" "project" {} + + +resource "google_service_directory_namespace" "example" { + provider = google-beta + namespace_id = "example-namespace-${local.name_suffix}" + location = "us-central1" +} + +resource "google_service_directory_service" "example" { + provider = google-beta + service_id = "example-service-${local.name_suffix}" + namespace = google_service_directory_namespace.example.id +} + +resource "google_service_directory_endpoint" "example" { + provider = google-beta + endpoint_id = "example-endpoint-${local.name_suffix}" + service = google_service_directory_service.example.id + + metadata = { + stage = "prod" + region = "us-central1" + } + + address = "1.2.3.4" + port = 5353 +} + + +resource "google_service_directory_namespace" "example" { + provider = google-beta + namespace_id = "example-namespace-${local.name_suffix}" + location = "us-central1" +} + +resource "google_service_directory_service" "example" { + provider = google-beta + service_id = "example-service-${local.name_suffix}" + namespace = google_service_directory_namespace.example.id +} + +resource "google_service_directory_endpoint" "example" { + provider = google-beta + endpoint_id = "example-endpoint-${local.name_suffix}" + service = google_service_directory_service.example.id + + metadata = { + stage = "prod" + region = "us-central1" + } + + address = "1.2.3.4" + port = 5353 +} + + +resource "google_service_directory_namespace" "example" { + provider = google-beta + namespace_id = "example-namespace-${local.name_suffix}" + location = "us-central1" +} + +resource "google_service_directory_service" "example" { + provider = google-beta + service_id = "example-service-${local.name_suffix}" + namespace = google_service_directory_namespace.example.id +} + +resource "google_service_directory_endpoint" "example" { + provider = google-beta + endpoint_id = "example-endpoint-${local.name_suffix}" + service = google_service_directory_service.example.id + + metadata = { + stage = "prod" + region = "us-central1" + } + + address = "1.2.3.4" + port = 5353 +} + + +resource "google_service_directory_namespace" "example" { + provider = google-beta + namespace_id = "example-namespace-${local.name_suffix}" + location = "us-central1" +} + +resource "google_service_directory_service" "example" { + provider = google-beta + service_id = "example-service-${local.name_suffix}" + namespace = google_service_directory_namespace.example.id +} + +resource "google_service_directory_endpoint" "example" { + provider = google-beta + endpoint_id = "example-endpoint-${local.name_suffix}" + service = google_service_directory_service.example.id + + metadata = { + stage = "prod" + region = "us-central1" + } + + address = "1.2.3.4" + port = 5353 +} + + +resource "google_datastore_index" "default" { + kind = "foo" + properties { + name = "property_a-${local.name_suffix}" + direction = "ASCENDING" + } + properties { + name = "property_b-${local.name_suffix}" + direction = "ASCENDING" + } +} + + +resource "google_compute_region_security_policy" "default" { + provider = google-beta + + region = "us-west2" + name = "policyruletest-${local.name_suffix}" + description = "basic region security policy" + type = "CLOUD_ARMOR" +} + +resource "google_compute_region_security_policy_rule" "policy_rule" { + provider = google-beta + + region = "us-west2" + security_policy = google_compute_region_security_policy.default.name + description = "new rule" + priority = 100 + match { + versioned_expr = "SRC_IPS_V1" + config { + src_ip_ranges = ["10.10.0.0/16"] + } + } + action = "allow" + preview = true +} + + +resource "google_compute_region_security_policy" "default" { + provider = google-beta + + region = "us-west2" + name = "policyruletest-${local.name_suffix}" + description = "basic region security policy" + type = "CLOUD_ARMOR" +} + +resource "google_compute_region_security_policy_rule" "policy_rule" { + provider = google-beta + + region = "us-west2" + security_policy = google_compute_region_security_policy.default.name + description = "new rule" + priority = 100 + match { + versioned_expr = "SRC_IPS_V1" + config { + src_ip_ranges = ["10.10.0.0/16"] + } + } + action = "allow" + preview = true +} + diff --git a/mmv1/templates/inspec/tests/integration/configuration/mm-attributes.yml b/mmv1/templates/inspec/tests/integration/configuration/mm-attributes.yml index 1fea58a7d..8e1d76afe 100644 --- a/mmv1/templates/inspec/tests/integration/configuration/mm-attributes.yml +++ b/mmv1/templates/inspec/tests/integration/configuration/mm-attributes.yml @@ -5,7 +5,7 @@ ssl_policy: custom_feature: 'TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384' custom_feature2: 'TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384' -topic: +topic: name: 'inspec-gcp-topic' subscription: @@ -232,7 +232,7 @@ standardappversion: runtime: nodejs10 entrypoint: "node ./app.js" port: "8080" - + ml_model: name: ml_model region: us-central1 @@ -306,6 +306,11 @@ network_endpoint_group: name: inspec-gcp-endpoint-group default_port: 90 +global_network_endpoint_group: + name: inspec-gcp-global-endpoint-group + network_endpoint_type: INTERNET_IP_PORT + default_port: 90 + node_template: name: inspec-node-template label_key: key @@ -338,7 +343,7 @@ spannerinstance: spannerdatabase: name: spdatabase instance: spinstance - ddl: "CREATE TABLE test (test STRING(MAX),) PRIMARY KEY (test)" + ddl: "CREATE TABLE test (test STRING(MAX),) PRIMARY KEY (test)" scheduler_job: # region must match where the appengine instance is deployed @@ -437,7 +442,7 @@ logging_metric: compute_image: name: inspec-image source: https://storage.googleapis.com/bosh-gce-raw-stemcells/bosh-stemcell-97.98-google-kvm-ubuntu-xenial-go_agent-raw-1557960142.tar.gz - + security_policy: name: sec-policy action: deny(403) @@ -492,7 +497,7 @@ sql_database_flag: applies_to: MYSQL_5_6 allowed_string_values: ON requires_restart: true - + sql_connect: region: us-central1 database_version: POSTGRES_13 @@ -514,4 +519,887 @@ region_health_check: region: us-central1 timeout_sec: 10 check_interval_sec: 10 - tcp_health_check_port: 80 \ No newline at end of file + tcp_health_check_port: 80 + +dlp: + name: "i-inspec-gcp-dlp" + location: "us-east-2" + type: "INSPECT_JOB" + state: "ACTIVE" + inspectDetails: + requestedOptions: + snapshotInspectTemplate: "" + jobConfig: + storageConfig: + hybridOptions: + description: "test" + tableOptions: "" + description: "Description" + display_name: "Displayname" + job_attribute_name: "job_attribute-1" + job_trigger_status: "HEALTHY" + job_trigger_name: "name1" + job_trigger_display_name: "dp" + job_trigger_description: "description" + deidentify_templates: + name: "dlp-template-inspec" + location: "europe-west2" + type: "Infotype" + +featurestore: + name : "value_name" + region : "value_region" + parent : "value_parent" + state : "value_state" + create_time : "value_createtime" + etag : "value_etag" + update_time : "value_updatetime" + +training_pipeline: + name : "value_name" + job_id: "job_id" + region : "value_region" + parent : "value_parent" + + +nas_job: + name : "value_name" + region : "value_region" + parent : "value_parent" + end_time : "value_endtime" + state : "value_state" + create_time : "value_createtime" + display_name : "value_displayname" + start_time : "value_starttime" + update_time : "value_updatetime" + +batch_prediction_job: + name : "value_name" + region : "value_region" + parent : "value_parent" + create_time : "value_createtime" + model_version_id : "value_modelversionid" + end_time : "value_endtime" + start_time : "value_starttime" + update_time : "value_updatetime" + state : "value_state" + model : "value_model" + display_name : "value_displayname" + service_account : "value_serviceaccount" + +custom_job: + name : "value_name" + job_id: "job_id" + region : "value_region" + parent : "value_parent" + +index: + name : "value_name" + region : "value_region" + parent : "value_parent" + description : "value_description" + display_name : "value_displayname" + metadata_schema_uri : "value_metadataschemauri" + index_update_method : "value_indexupdatemethod" + update_time : "value_updatetime" + create_time : "value_createtime" + etag : "value_etag" + +tensorboard: + name : "value_name" + region : "value_region" + parent : "value_parent" + update_time : "value_updatetime" + blob_storage_path_prefix : "value_blobstoragepathprefix" + etag : "value_etag" + create_time : "value_createtime" + display_name : "value_displayname" + description : "value_description" + +model: + name : "value_name" + region : "value_region" + parent : "value_parent" + update_time : "value_updatetime" + etag : "value_etag" + description : "value_description" + create_time : "value_createtime" + pipeline_job : "value_pipelinejob" + version_update_time : "value_versionupdatetime" + metadata_artifact : "value_metadataartifact" + metadata_schema_uri : "value_metadataschemauri" + version_id : "value_versionid" + artifact_uri : "value_artifacturi" + training_pipeline : "value_trainingpipeline" + display_name : "value_displayname" + version_create_time : "value_versioncreatetime" + version_description : "value_versiondescription" + +index_endpoint: + name : "value_name" + region : "value_region" + parent : "value_parent" + display_name : "value_displayname" + create_time : "value_createtime" + network : "value_network" + update_time : "value_updatetime" + public_endpoint_domain_name : "value_publicendpointdomainname" + etag : "value_etag" + description : "value_description" + +featurestores_entity_type: + name : "value_name" + region : "value_region" + parent : "value_parent" + description : "value_description" + create_time : "value_createtime" + etag : "value_etag" + update_time : "value_updatetime" + + +tensorboard_experiment_run: + name : "sklearn-2023-09-22-17-16-16-a25b0" + tensorboard: "1976367752880848896" + experiment: "autologging-experiment-fyc24zb2" + region : "us-central1" + parent : "projects/165434197229/locations/us-central1/tensorboards/1976367752880848896/experiments/autologging-experiment-fyc24zb2/runs/" + +studies_trial: + name : "1" + region : "us-central1" + parent : "projects/165434197229/locations/us-central1/studies/2975668570413/trials/" + study: "890385007008" + start_time : "value_starttime" + end_time : "value_endtime" + infeasible_reason : "value_infeasiblereason" + client_id : "value_clientid" + custom_job : "value_customjob" + state : "value_state" + id : "value_id" + +hyperparameter_tuning_job: + name : "9200900561803673600" + region : "us-central1" + parent : "projects/165434197229/locations/us-central1/hyperparameterTuningJobs/" + state : "JOB_STATE_RUNNING" + end_time : "value_endtime" + update_time : "value_updatetime" + start_time : "value_starttime" + create_time : "value_createtime" + display_name : "inspec-hyper-test-hyperparameter-tuning-job" + +models_evaluation: + name : "value_name" + region : "value_region" + parent : "value_parent" + data_item_schema_uri : "value_dataitemschemauri" + metrics_schema_uri : "value_metricsschemauri" + create_time : "value_createtime" + annotation_schema_uri : "value_annotationschemauri" + display_name : "value_displayname" + +tensorboards_experiment: + name : "inspec-tensor-experiment" + region : "us-central1" + parent : "projects/165434197229/locations/us-central1/tensorboards/6346548241290493952/experiments/" + description : "value_description" + tensorboard: "6346548241290493952" + source : "value_source" + display_name : "inspec-tensor-experiment" + create_time : "value_createtime" + update_time : "value_updatetime" + etag : "value_etag" + +featurestore_entity_type_feature: + parent : "value_parent" + region : "value_region" + description : "value_description" + create_time : "value_createtime" + etag : "value_etag" + name : "value_name" + update_time : "value_updatetime" + value_type : "value_valuetype" + +dataset_data_item_annotation: + name: "1746031646898913280" + region: "us-central1" + dataset: "1044994542735982592" + dataItem: "75173735366921" + parent: "projects/165434197229/locations/us-central1/datasets/1044994542735982592/dataItems/75173735366921/annotations/" + +model_deployment_monitoring_job: + name : "4965515800912855040" + region : "us-central1" + parent : "projects/165434197229/locations/us-central1/modelDeploymentMonitoringJobs/" + state : "value_state" + analysis_instance_schema_uri : "value_analysisinstanceschemauri" + endpoint : "projects/165434197229/locations/us-central1/endpoints/5787303642054787072" + display_name : "churn" + schedule_state : "value_schedulestate" + predict_instance_schema_uri : "value_predictinstanceschemauri" + next_schedule_time : "value_nextscheduletime" + create_time : "value_createtime" + log_ttl : "value_logttl" + update_time : "value_updatetime" + +metadata_stores_metadata_schema: + name : "system-dag-execution-v0-0-1" + region : "us-central1" + parent : "projects/165434197229/locations/us-central1/metadataStores/default/metadataSchemas/" + metadataStore: "default" + schema_type : "EXECUTION_TYPE" + description : "value_description" + schema_version : "value_schemaversion" + create_time : "value_createtime" + schema : "value_schema" + +metadata_stores_execution: + name : "12528100122877440041" + region : "us-central1" + parent : "projects/165434197229/locations/us-central1/metadataStores/default/executions/" + metadataStore: "default" + create_time : "value_createtime" + schema_version : "value_schemaversion" + state : "value_state" + etag : "value_etag" + display_name : "endpoint-create-20230918054541-20230918054500491" + schema_title : "value_schematitle" + description : "value_description" + update_time : "value_updatetime" + +metadata_stores_context: + name : "autologging-experiment-w0apl7la-autologging-tf-experiment-w0apl7la" + region : "us-central1" + parent : "projects/165434197229/locations/us-central1/metadataStores/default/contexts/" + metadataStore: "default" + schema_title : "value_schematitle" + etag : "value_etag" + description : "value_description" + display_name : "autologging-tf-experiment-w0apl7la" + schema_version : "value_schemaversion" + create_time : "value_createtime" + update_time : "value_updatetime" + +metadata_stores_artifact: + name : "2811503570633325756" + region : "us-central1" + parent : "projects/165434197229/locations/us-central1/metadataStores/default/artifacts/" + metadataStore: "default" + schema_version : "value_schemaversion" + display_name : "value_displayname" + etag : "value_etag" + update_time : "value_updatetime" + state : "value_state" + uri : "value_uri" + create_time : "value_createtime" + schema_title : "value_schematitle" + description : "value_description" + +datasets_saved_query: + parent : "projects/165434197229/locations/us-central1/datasets/1044994542735982592/savedQueries/" + region : "us-central1" + dataset: "1044994542735982592" + update_time : "value_updatetime" + problem_type : "value_problemtype" + name : "2236927819407949824" + create_time : "value_createtime" + etag : "value_etag" + display_name : "inspec-annotation-test" + annotation_filter : "value_annotationfilter" + +datasets_annotation_spec: + name : "5438527833485869056" + region : "us-central1" + dataset: "1044994542735982592" + parent: "projects/165434197229/locations/us-central1/datasets/1044994542735982592/annotationSpecs/" + display_name : "InSpec" + etag : "value_etag" + create_time : "value_createtime" + update_time : "value_updatetime" + +nas_jobs_nas_trial_detail: + name : "1" + region : "us-central1" + nasJob: "3217974009958236160" + parent : "projects/ppradhan/locations/us-central1/nasJobs/3217974009958236160/nasTrialDetails/" + parameters : "value_parameters" + +metadata_store: + name : "default" + region : "us-central1" + parent : "projects/165434197229/locations/us-central1/metadataStores/" + description : "value_description" + create_time : "value_createtime" + update_time : "value_updatetime" + +endpoint: + name : "value_name" + region : "value_region" + parent : "value_parent" + update_time : "value_updatetime" + model_deployment_monitoring_job : "value_modeldeploymentmonitoringjob" + description : "value_description" + network : "value_network" + display_name : "value_displayname" + etag : "value_etag" + create_time : "value_createtime" + +models_evaluations_slice: + name : "value_name" + region : "value_region" + parent : "value_parent" + create_time : "value_createtime" + metrics_schema_uri : "value_metricsschemauri" + +datasets_data_item: + parent : "value_parent" + region : "value_region" + update_time : "value_updatetime" + etag : "value_etag" + name : "value_name" + create_time : "value_createtime" + +study: + name : "value_name" + region : "value_region" + parent : "value_parent" + display_name : "value_displayname" + state : "value_state" + create_time : "value_createtime" + inactive_reason : "value_inactivereason" + +tensorboard_experiment_run_time_series_resource: + name : "value_name" + region : "value_region" + parent : "value_parent" + plugin_name : "value_pluginname" + plugin_data : "value_plugindata" + description : "value_description" + etag : "value_etag" + display_name : "value_displayname" + update_time : "value_updatetime" + create_time : "value_createtime" + value_type : "value_valuetype" + +project_region_cluster: + cluster_name : "value_clustername" + project_id : "value_projectid" + region : "value_region" + +project_location_image_version: + parent : "value_parent" + +vpn_gateway: + project : "value_project" + region : "value_region" + vpn_gateway : "value_vpngateway" + kind : "value_kind" + id : "value_id" + creation_timestamp : "value_creationtimestamp" + name : "value_name" + description : "value_description" + network : "value_network" + self_link : "value_selflink" + label_fingerprint : "value_labelfingerprint" + stack_type : "value_stacktype" + +organization_envgroup: + name : "value_name" + parent : "value_parent" + last_modified_at : "value_lastmodifiedat" + state : "value_state" + created_at : "value_createdat" + +organization: + name : "value_name" + parent : "value_parent" + api_consumer_data_encryption_key_name : "value_apiconsumerdataencryptionkeyname" + runtime_database_encryption_key_name : "value_runtimedatabaseencryptionkeyname" + runtime_type : "value_runtimetype" + type : "value_type" + authorized_network : "value_authorizednetwork" + project_id : "value_projectid" + description : "value_description" + ca_certificate : "value_cacertificate" + subscription_type : "value_subscriptiontype" + customer_name : "value_customername" + created_at : "value_createdat" + last_modified_at : "value_lastmodifiedat" + subscription_plan : "value_subscriptionplan" + state : "value_state" + control_plane_encryption_key_name : "value_controlplaneencryptionkeyname" + analytics_region : "value_analyticsregion" + api_consumer_data_location : "value_apiconsumerdatalocation" + display_name : "value_displayname" + apigee_project_id : "value_apigeeprojectid" + expires_at : "value_expiresat" + billing_type : "value_billingtype" + +project_location_environment: + name : "value_name" + parent : "value_parent" + uuid : "value_uuid" + state : "value_state" + create_time : "value_createtime" + update_time : "value_updatetime" + + +folder_policy: + name : "value_name" + parent : "value_parent" + +organization_api_revision: + name : "value_name" + content_type : "value_contenttype" + data : "value_data" + +project_location_key_ring: + name : "value_name" + parent : "value_parent" + create_time : "value_createtime" + +project_secret: + name : "value_name" + +service_connection: + parent : "value_parent" + +project_service_account_key: + name : "value_name" + private_key_type : "value_privatekeytype" + key_algorithm : "value_keyalgorithm" + private_key_data : "value_privatekeydata" + public_key_data : "value_publickeydata" + valid_after_time : "value_validaftertime" + valid_before_time : "value_validbeforetime" + key_origin : "value_keyorigin" + key_type : "value_keytype" +machine_type: + machine_type : "value_machinetype" + project : "value_project" + zone : "value_zone" + kind : "value_kind" + id : "value_id" + creation_timestamp : "value_creationtimestamp" + name : "value_name" + description : "value_description" + maximum_persistent_disks_size_gb : "value_maximumpersistentdiskssizegb" + self_link : "value_selflink" +license: + license : "value_license" + project : "value_project" + kind : "value_kind" + name : "value_name" + id : "value_id" + license_code : "value_licensecode" + creation_timestamp : "value_creationtimestamp" + description : "value_description" + self_link : "value_selflink" +machine_image: + machine_image : "value_machineimage" + project : "value_project" + kind : "value_kind" + id : "value_id" + creation_timestamp : "value_creationtimestamp" + name : "value_name" + description : "value_description" + self_link : "value_selflink" + source_instance : "value_sourceinstance" + status : "value_status" + total_storage_bytes : "value_totalstoragebytes" +project_location_job: + job_id : "value_jobid" + location : "value_location" + project_id : "value_projectid" + id : "value_id" + name : "value_name" + type : "value_type" + steps_location : "value_stepslocation" + current_state : "value_currentstate" + current_state_time : "value_currentstatetime" + requested_state : "value_requestedstate" + create_time : "value_createtime" + replace_job_id : "value_replacejobid" + client_request_id : "value_clientrequestid" + replaced_by_job_id : "value_replacedbyjobid" + start_time : "value_starttime" + created_from_snapshot_id : "value_createdfromsnapshotid" +network_edge_security_service: + machine_type : "value_machinetype" + project : "value_project" + zone : "value_zone" + kind : "value_kind" + id : "value_id" + creation_timestamp : "value_creationtimestamp" + name : "value_name" + description : "value_description" + self_link : "value_selflink" + self_link_with_id : "value_selflinkwithid" + region : "value_region" + fingerprint : "value_fingerprint" + security_policy : "value_securitypolicy" +network_attachment: + network_attachment : "value_networkattachment" + project : "value_project" + region : "value_region" + kind : "value_kind" + id : "value_id" + creation_timestamp : "value_creationtimestamp" + name : "value_name" + description : "value_description" + self_link : "value_selflink" + self_link_with_id : "value_selflinkwithid" + connection_preference : "value_connectionpreference" + fingerprint : "value_fingerprint" + network : "value_network" +region_network_endpoint_group: + network_endpoint_group : "value_networkendpointgroup" + project : "value_project" + kind : "value_kind" + id : "value_id" + creation_timestamp : "value_creationtimestamp" + self_link : "value_selflink" + name : "value_name" + description : "value_description" + network_endpoint_type : "value_networkendpointtype" + region : "value_region" + zone : "value_zone" + network : "value_network" + subnetwork : "value_subnetwork" + psc_target_service : "value_psctargetservice" +organization_api: + name : "value_name" + parent : "value_parent" + latest_revision_id : "value_latestrevisionid" + api_proxy_type : "value_apiproxytype" +project_location_repository: + name : "value_name" + parent : "value_parent" +organization_envgroup_attachment: + name : "value_name" + parent : "value_parent" + created_at : "value_createdat" + environment : "value_environment" + environment_group_id : "value_environmentgroupid" +node_type: + node_type : "value_nodetype" + project : "value_project" + zone : "value_zone" + kind : "value_kind" + id : "value_id" + creation_timestamp : "value_creationtimestamp" + name : "value_name" + description : "value_description" + cpu_platform : "value_cpuplatform" + self_link : "value_selflink" +packet_mirroring: + packet_mirroring : "value_packetmirroring" + project : "value_project" + region : "value_region" + kind : "value_kind" + id : "value_id" + creation_timestamp : "value_creationtimestamp" + self_link : "value_selflink" + self_link_with_id : "value_selflinkwithid" + name : "value_name" + description : "value_description" + enable : "value_enable" +region_disk_type: + disk_type : "value_disktype" + project : "value_project" + region : "value_region" + kind : "value_kind" + id : "value_id" + creation_timestamp : "value_creationtimestamp" + name : "value_name" + description : "value_description" + valid_disk_size : "value_validdisksize" + zone : "value_zone" + self_link : "value_selflink" + default_disk_size_gb : "value_defaultdisksizegb" +target_vpn_gateway: + project : "value_project" + region : "value_region" + target_vpn_gateway : "value_targetvpngateway" + kind : "value_kind" + id : "value_id" + creation_timestamp : "value_creationtimestamp" + name : "value_name" + description : "value_description" + network : "value_network" + status : "value_status" + self_link : "value_selflink" + label_fingerprint : "value_labelfingerprint" +region_ssl_policy: + commitment : "value_commitment" + project : "value_project" + region : "value_region" + kind : "value_kind" + id : "value_id" + creation_timestamp : "value_creationtimestamp" + self_link : "value_selflink" + self_link_with_id : "value_selflinkwithid" + name : "value_name" + description : "value_description" + profile : "value_profile" + min_tls_version : "value_mintlsversion" + fingerprint : "value_fingerprint" +region_autoscaler: + autoscaler : "value_autoscaler" + project : "value_project" + region : "value_region" + kind : "value_kind" + id : "value_id" + creation_timestamp : "value_creationtimestamp" + name : "value_name" + description : "value_description" + target : "value_target" + zone : "value_zone" + self_link : "value_selflink" + self_link_with_id : "value_selflinkwithid" + status : "value_status" +resource_policy: + project : "value_project" + region : "value_region" + resource_policy : "value_resourcepolicy" + kind : "value_kind" + id : "value_id" + creation_timestamp : "value_creationtimestamp" + self_link : "value_selflink" + self_link_with_id : "value_selflinkwithid" + description : "value_description" + name : "value_name" + status : "value_status" +region_security_policy: + securitypolicy : "value_securitypolicy" + +interconnect_attachment: + interconnect_attachment : "value_interconnectattachment" + project : "value_project" + region : "value_region" + kind : "value_kind" + description : "value_description" + self_link : "value_selflink" + self_link_with_id : "value_selflinkwithid" + id : "value_id" + creation_timestamp : "value_creationtimestamp" + name : "value_name" + interconnect : "value_interconnect" + router : "value_router" + google_reference_id : "value_googlereferenceid" + operational_status : "value_operationalstatus" + cloud_router_ip_address : "value_cloudrouteripaddress" + customer_router_ip_address : "value_customerrouteripaddress" + type : "value_type" + pairing_key : "value_pairingkey" + edge_availability_domain : "value_edgeavailabilitydomain" + bandwidth : "value_bandwidth" + label_fingerprint : "value_labelfingerprint" + state : "value_state" + partner_asn : "value_partnerasn" + encryption : "value_encryption" + stack_type : "value_stacktype" + cloud_router_ipv6address : "value_cloudrouteripv6address" + customer_router_ipv6address : "value_customerrouteripv6address" + cloud_router_ipv6interface_id : "value_cloudrouteripv6interfaceid" + customer_router_ipv6interface_id : "value_customerrouteripv6interfaceid" + remote_service : "value_remoteservice" + +organization_constraint: + parent : "value_parent" + +target_ssl_proxy: + project : "value_project" + region : "value_region" + resource_policy : "value_resourcepolicy" + kind : "value_kind" + id : "value_id" + creation_timestamp : "value_creationtimestamp" + name : "value_name" + description : "value_description" + self_link : "value_selflink" + service : "value_service" + certificate_map : "value_certificatemap" + proxy_header : "value_proxyheader" + ssl_policy : "value_sslpolicy" + +project_constraint: + parent : "value_parent" + + +target_grpc_proxy: + project : "value_project" + target_grpc_proxy : "value_targetgrpcproxy" + kind : "value_kind" + id : "value_id" + creation_timestamp : "value_creationtimestamp" + name : "value_name" + description : "value_description" + self_link : "value_selflink" + self_link_with_id : "value_selflinkwithid" + url_map : "value_urlmap" + fingerprint : "value_fingerprint" +region_notification_endpoint: + project : "value_project" + public_advertised_prefix : "value_publicadvertisedprefix" + kind : "value_kind" + id : "value_id" + creation_timestamp : "value_creationtimestamp" + name : "value_name" + description : "value_description" + self_link : "value_selflink" + region : "value_region" +public_advertised_prefix: + project : "value_project" + public_advertised_prefix : "value_publicadvertisedprefix" + kind : "value_kind" + id : "value_id" + creation_timestamp : "value_creationtimestamp" + name : "value_name" + description : "value_description" + self_link : "value_selflink" + self_link_with_id : "value_selflinkwithid" + ip_cidr_range : "value_ipcidrrange" + dns_verification_ip : "value_dnsverificationip" + shared_secret : "value_sharedsecret" + status : "value_status" + pdp_scope : "value_pdpscope" + fingerprint : "value_fingerprint" + byoip_api_version : "value_byoipapiversion" + +folder_constraint: + parent : "value_parent" + + +reservation: + project : "value_project" + reservation : "value_reservation" + zone : "value_zone" + kind : "value_kind" + id : "value_id" + creation_timestamp : "value_creationtimestamp" + self_link : "value_selflink" + self_link_with_id : "value_selflinkwithid" + description : "value_description" + name : "value_name" + commitment : "value_commitment" + status : "value_status" + delete_at_time : "value_deleteattime" + +project_region_autoscaling_policy: + name : "value_name" + parent : "value_parent" + id : "value_id" + + + +project_instance_cluster: + name : "value_name" + parent : "value_parent" + location : "value_location" + state : "value_state" + default_storage_type : "value_defaultstoragetype" + +project_location_federation: + name : "value_name" + parent : "value_parent" + create_time : "value_createtime" + update_time : "value_updatetime" + version : "value_version" + endpoint_uri : "value_endpointuri" + state : "value_state" + state_message : "value_statemessage" + uid : "value_uid" + +project_location_service: + name : "value_name" + parent : "value_parent" + create_time : "value_createtime" + update_time : "value_updatetime" + network : "value_network" + endpoint_uri : "value_endpointuri" + state : "value_state" + state_message : "value_statemessage" + artifact_gcs_uri : "value_artifactgcsuri" + tier : "value_tier" + uid : "value_uid" + release_channel : "value_releasechannel" + database_type : "value_databasetype" + +project_region_job: + job_id : "value_jobid" + project_id : "value_projectid" + region : "value_region" + driver_output_resource_uri : "value_driveroutputresourceuri" + driver_control_files_uri : "value_drivercontrolfilesuri" + job_uuid : "value_jobuuid" + +project_location_session: + name : "value_name" + parent : "value_parent" + uuid : "value_uuid" + create_time : "value_createtime" + state : "value_state" + state_message : "value_statemessage" + state_time : "value_statetime" + creator : "value_creator" + user : "value_user" + session_template : "value_sessiontemplate" + +project_location_workflow_template: + project : "value_project" + reservation : "value_reservation" + zone : "value_zone" + id : "value_id" + name : "value_name" + create_time : "value_createtime" + update_time : "value_updatetime" + dag_timeout : "value_dagtimeout" + +project_instance_app_profile: + name : "value_name" + parent : "value_parent" + etag : "value_etag" + description : "value_description" + priority : "value_priority" + +project_location_service_backup: + name : "value_name" + parent : "value_parent" + create_time : "value_createtime" + end_time : "value_endtime" + state : "value_state" + description : "value_description" +project_instance_cluster_backup: + name : "value_name" + parent : "value_parent" + source_table : "value_sourcetable" + source_backup : "value_sourcebackup" + expire_time : "value_expiretime" + start_time : "value_starttime" + end_time : "value_endtime" + size_bytes : "value_sizebytes" + state : "value_state" + +organization_instance_attachment: + name : "test_org" + parent : "value_parent" + environment : "value_environment" + created_at : "value_createdat" + +batch: + name : "batch1" + parent : "value_parent" + uuid : "value_uuid" + create_time : "value_createtime" + state : "value_state" + state_message : "value_statemessage" + state_time : "value_statetime" + creator : "value_creator" + operation : "value_operation" + +project_group: + name : "value_name" + display_name : "value_displayname" + parent_name : "value_parentname" + filter : "value_filter" \ No newline at end of file diff --git a/mmv1/templates/terraform/custom_flatten/cloud_scheduler_paused.go.erb b/mmv1/templates/terraform/custom_flatten/cloud_scheduler_paused.go.erb deleted file mode 100644 index e1525ae87..000000000 --- a/mmv1/templates/terraform/custom_flatten/cloud_scheduler_paused.go.erb +++ /dev/null @@ -1,10 +0,0 @@ -func flatten<%= prefix -%><%= titlelize_property(property) -%>(v interface{}, d *schema.ResourceData, config *Config) interface{} { - state := d.Get("state"); - if state == "PAUSED" { - return true - } - if state == "ENABLED" { - return false - } - return false // Job has an error state that's not paused or enabled -} diff --git a/mmv1/templates/terraform/custom_import/kms_key_ring_import_job.go.erb b/mmv1/templates/terraform/custom_import/kms_key_ring_import_job.go.erb index efa779881..17dd29691 100644 --- a/mmv1/templates/terraform/custom_import/kms_key_ring_import_job.go.erb +++ b/mmv1/templates/terraform/custom_import/kms_key_ring_import_job.go.erb @@ -21,4 +21,4 @@ if err := d.Set("import_job_id", stringParts[5]); err != nil { return nil, fmt.Errorf("Error setting import_job_id: %s", err) } - return []*schema.ResourceData{d}, nil + return []*schema.ResourceData{d}, nil \ No newline at end of file diff --git a/mmv1/templates/terraform/encoders/cloud_scheduler.go.erb b/mmv1/templates/terraform/encoders/cloud_scheduler.go.erb deleted file mode 100644 index 67d57dbee..000000000 --- a/mmv1/templates/terraform/encoders/cloud_scheduler.go.erb +++ /dev/null @@ -1,2 +0,0 @@ -delete(obj, "paused") // Field doesn't exist in API -return obj, nil diff --git a/mmv1/templates/terraform/examples/data_fusion_instance_basic.tf.erb b/mmv1/templates/terraform/examples/data_fusion_instance_basic.tf.erb index 99e865279..31f3d3706 100644 --- a/mmv1/templates/terraform/examples/data_fusion_instance_basic.tf.erb +++ b/mmv1/templates/terraform/examples/data_fusion_instance_basic.tf.erb @@ -1,4 +1,4 @@ -resource "google_data_fusion_instance" "<%= ctx[:primary_resource_id] %>" { +resource "google_datafusion_instance" "<%= ctx[:primary_resource_id] %>" { name = "<%= ctx[:vars]["instance_name"] %>" region = "us-central1" type = "BASIC" diff --git a/mmv1/templates/terraform/examples/data_fusion_instance_full.tf.erb b/mmv1/templates/terraform/examples/data_fusion_instance_full.tf.erb index 93f3c64ed..3dcc20962 100644 --- a/mmv1/templates/terraform/examples/data_fusion_instance_full.tf.erb +++ b/mmv1/templates/terraform/examples/data_fusion_instance_full.tf.erb @@ -1,4 +1,4 @@ -resource "google_data_fusion_instance" "<%= ctx[:primary_resource_id] %>" { +resource "google_datafusion_instance" "<%= ctx[:primary_resource_id] %>" { name = "<%= ctx[:vars]["instance_name"] %>" description = "My Data Fusion instance" region = "us-central1" diff --git a/mmv1/templates/terraform/examples/scheduler_job_paused.tf.erb b/mmv1/templates/terraform/examples/scheduler_job_paused.tf.erb deleted file mode 100644 index 0eb6f062c..000000000 --- a/mmv1/templates/terraform/examples/scheduler_job_paused.tf.erb +++ /dev/null @@ -1,18 +0,0 @@ -resource "google_cloud_scheduler_job" "job" { - paused = true - name = "<%= ctx[:vars]['job_name'] %>" - description = "test http job with updated fields" - schedule = "*/8 * * * *" - time_zone = "America/New_York" - attempt_deadline = "320s" - - retry_config { - retry_count = 1 - } - - http_target { - http_method = "POST" - uri = "https://example.com/ping" - body = base64encode("{\"foo\":\"bar\"}") - } -} diff --git a/mmv1/templates/terraform/post_create/cloud_scheduler.go.erb b/mmv1/templates/terraform/post_create/cloud_scheduler.go.erb index ec9cd75b2..9bef675d4 100644 --- a/mmv1/templates/terraform/post_create/cloud_scheduler.go.erb +++ b/mmv1/templates/terraform/post_create/cloud_scheduler.go.erb @@ -1,9 +1,4 @@ endpoint := "resume" // Default to enabled -logSuccessMsg := "Job state has been set to ENABLED" -if paused, pausedOk := d.GetOk("paused"); pausedOk && paused.(bool) { - endpoint = "pause" - logSuccessMsg = "Job state has been set to PAUSED" -} linkTmpl := fmt.Sprintf("{{CloudSchedulerBasePath}}projects/{{project}}/locations/{{region}}/jobs/{{name}}:%s", endpoint) url, err = replaceVars(d, config, linkTmpl) diff --git a/mmv1/templates/terraform/post_update/cloud_scheduler.go.erb b/mmv1/templates/terraform/post_update/cloud_scheduler.go.erb deleted file mode 100644 index 27196370d..000000000 --- a/mmv1/templates/terraform/post_update/cloud_scheduler.go.erb +++ /dev/null @@ -1,25 +0,0 @@ -if d.HasChange("paused") { - endpoint := "resume" // Default to enabled - logSuccessMsg := "Job state has been set to ENABLED" - if paused, pausedOk := d.GetOk("paused"); pausedOk { - if paused.(bool) { - endpoint = "pause" - logSuccessMsg = "Job state has been set to PAUSED" - } - } - - linkTmpl := fmt.Sprintf("{{CloudSchedulerBasePath}}projects/{{project}}/locations/{{region}}/jobs/{{name}}:%s", endpoint) - url, err = replaceVars(d, config, linkTmpl) - if err != nil { - return err - } - - emptyReqBody := make(map[string]interface{}) - - _, err = sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, emptyReqBody, d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return fmt.Errorf("Error setting Cloud Scheduler Job status: %s", err) - } - - log.Printf("[DEBUG] Finished updating Job %q status: %s", d.Id(), logSuccessMsg) -} diff --git a/mmv1/third_party/terraform/tests/resource_cloud_scheduler_job_test.go b/mmv1/third_party/terraform/tests/resource_cloud_scheduler_job_test.go index d226fb625..a21b3b135 100644 --- a/mmv1/third_party/terraform/tests/resource_cloud_scheduler_job_test.go +++ b/mmv1/third_party/terraform/tests/resource_cloud_scheduler_job_test.go @@ -95,63 +95,9 @@ func TestCloudScheduler_FlattenHttpHeaders(t *testing.T) { } } -func TestAccCloudSchedulerJob_schedulerPausedExample(t *testing.T) { - t.Parallel() - - context := map[string]interface{}{ - "random_suffix": randString(t, 10), - } - - vcrTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckCloudSchedulerJobDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccCloudSchedulerJob_schedulerPaused(context), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("google_cloud_scheduler_job.job", "paused", "true"), - resource.TestCheckResourceAttr("google_cloud_scheduler_job.job", "state", "PAUSED"), - ), - }, - { - Config: testAccCloudSchedulerJob_schedulerUnPaused(context), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("google_cloud_scheduler_job.job", "paused", "false"), - resource.TestCheckResourceAttr("google_cloud_scheduler_job.job", "state", "ENABLED"), - ), - }, - }, - }) -} - -func testAccCloudSchedulerJob_schedulerPaused(context map[string]interface{}) string { - return Nprintf(` -resource "google_cloud_scheduler_job" "job" { - paused = true - name = "tf-test-test-job%{random_suffix}" - description = "test http job with updated fields" - schedule = "*/8 * * * *" - time_zone = "America/New_York" - attempt_deadline = "320s" - - retry_config { - retry_count = 1 - } - - http_target { - http_method = "POST" - uri = "https://example.com/ping" - body = base64encode("{\"foo\":\"bar\"}") - } -} -`, context) -} - -func testAccCloudSchedulerJob_schedulerUnPaused(context map[string]interface{}) string { +func testAccCloudSchedulerJob_scheduler(context map[string]interface{}) string { return Nprintf(` resource "google_cloud_scheduler_job" "job" { - paused = false # Has been flipped name = "tf-test-test-job%{random_suffix}" description = "test http job with updated fields" schedule = "*/8 * * * *" diff --git a/mmv1/tools/doc-parser/package-lock.json b/mmv1/tools/doc-parser/package-lock.json index 94e829de2..b39ce8b23 100644 --- a/mmv1/tools/doc-parser/package-lock.json +++ b/mmv1/tools/doc-parser/package-lock.json @@ -7,10 +7,10 @@ "": { "name": "scrape-mm-yaml", "version": "1.0.0", - "license": "MIT", + "license": "Apache License Version 2.0", "dependencies": { "jsdom": "^16.6.0", - "node-fetch": "^2.6.1", + "node-fetch": "^2.6.7", "turndown": "^7.1.1" } }, @@ -396,11 +396,41 @@ "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" }, "node_modules/node-fetch": { - "version": "2.6.1", - "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.6.1.tgz", - "integrity": "sha512-V4aYg89jEoVRxRb2fJdAg8FHvI7cEyYdVAh94HH0UIK8oJxUfkjlDQN9RbMx+bEjP7+ggMiFRprSti032Oipxw==", + "version": "2.6.7", + "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.6.7.tgz", + "integrity": "sha512-ZjMPFEfVx5j+y2yF35Kzx5sF7kDzxuDj6ziH4FFbOp87zKDZNx8yExJIb05OGF4Nlt9IHFIMBkRl41VdvcNdbQ==", + "dependencies": { + "whatwg-url": "^5.0.0" + }, "engines": { "node": "4.x || >=6.0.0" + }, + "peerDependencies": { + "encoding": "^0.1.0" + }, + "peerDependenciesMeta": { + "encoding": { + "optional": true + } + } + }, + "node_modules/node-fetch/node_modules/tr46": { + "version": "0.0.3", + "resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz", + "integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==" + }, + "node_modules/node-fetch/node_modules/webidl-conversions": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz", + "integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==" + }, + "node_modules/node-fetch/node_modules/whatwg-url": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz", + "integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==", + "dependencies": { + "tr46": "~0.0.3", + "webidl-conversions": "^3.0.0" } }, "node_modules/nwsapi": { @@ -450,6 +480,16 @@ "node": ">=6" } }, + "node_modules/querystringify": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/querystringify/-/querystringify-2.2.0.tgz", + "integrity": "sha512-FIqgj2EUvTa7R50u0rGsyTftzjYmv/a3hO345bZNrqabNqjtgiDMgmo4mkUjd+nzU5oF3dClKqFIPUKybUyqoQ==" + }, + "node_modules/requires-port": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/requires-port/-/requires-port-1.0.0.tgz", + "integrity": "sha512-KigOCHcocU3XODJxsu8i/j8T9tzT4adHiecwORRQ0ZZFcp7ahwXuRU1m+yuO90C5ZUyGeGfocHDI14M3L3yDAQ==" + }, "node_modules/safer-buffer": { "version": "2.1.2", "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", @@ -481,13 +521,14 @@ "integrity": "sha512-9QNk5KwDF+Bvz+PyObkmSYjI5ksVUYtjW7AU22r2NKcfLJcXp96hkDWU3+XndOsUb+AQ9QhfzfCT2O+CNWT5Tw==" }, "node_modules/tough-cookie": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-4.0.0.tgz", - "integrity": "sha512-tHdtEpQCMrc1YLrMaqXXcj6AxhYi/xgit6mZu1+EDWUn+qhUf8wMQoFIy9NXuq23zAwtcB0t/MjACGR18pcRbg==", + "version": "4.1.3", + "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-4.1.3.tgz", + "integrity": "sha512-aX/y5pVRkfRnfmuX+OdbSdXvPe6ieKX/G2s7e98f4poJHnqH3281gDPm/metm6E/WRamfx7WC4HUqkWHfQHprw==", "dependencies": { "psl": "^1.1.33", "punycode": "^2.1.1", - "universalify": "^0.1.2" + "universalify": "^0.2.0", + "url-parse": "^1.5.3" }, "engines": { "node": ">=6" @@ -524,13 +565,22 @@ } }, "node_modules/universalify": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-0.1.2.tgz", - "integrity": "sha512-rBJeI5CXAlmy1pV+617WB9J63U6XcazHHF2f2dbJix4XzpUF0RS3Zbj0FGIOCAva5P/d/GBOYaACQ1w+0azUkg==", + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/universalify/-/universalify-0.2.0.tgz", + "integrity": "sha512-CJ1QgKmNg3CwvAv/kOFmtnEN05f0D/cn9QntgNOQlQF9dgvVTHj3t+8JPdjqawCHk7V/KA+fbUqzZ9XWhcqPUg==", "engines": { "node": ">= 4.0.0" } }, + "node_modules/url-parse": { + "version": "1.5.10", + "resolved": "https://registry.npmjs.org/url-parse/-/url-parse-1.5.10.tgz", + "integrity": "sha512-WypcfiRhfeUP9vvF0j6rw0J3hrWrw6iZv3+22h6iRMJ/8z1Tj6XfLP4DsUix5MhMPnXpiHDoKyoZ/bdCkwBCiQ==", + "dependencies": { + "querystringify": "^2.1.1", + "requires-port": "^1.0.0" + } + }, "node_modules/w3c-hr-time": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/w3c-hr-time/-/w3c-hr-time-1.0.2.tgz", @@ -585,17 +635,17 @@ } }, "node_modules/word-wrap": { - "version": "1.2.3", - "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.3.tgz", - "integrity": "sha512-Hz/mrNwitNRh/HUAtM/VT/5VH+ygD6DV7mYKZAtHOrbs8U7lvPS6xf7EJKMF0uW1KJCl0H701g3ZGus+muE5vQ==", + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.4.tgz", + "integrity": "sha512-2V81OA4ugVo5pRo46hAoD2ivUJx8jXmWXfUkY4KFNw0hEptvN0QfH3K4nHiwzGeKl5rFKedV48QVoqYavy4YpA==", "engines": { "node": ">=0.10.0" } }, "node_modules/ws": { - "version": "7.5.3", - "resolved": "https://registry.npmjs.org/ws/-/ws-7.5.3.tgz", - "integrity": "sha512-kQ/dHIzuLrS6Je9+uv81ueZomEwH0qVYstcAQ4/Z93K8zeko9gtAbttJWzoC5ukqXY1PpoouV3+VSOqEAFt5wg==", + "version": "7.5.10", + "resolved": "https://registry.npmjs.org/ws/-/ws-7.5.10.tgz", + "integrity": "sha512-+dbF1tHwZpXcbOJdVOkzLDxZP1ailvSxM6ZweXTegylPny803bFhA+vqBYw4s31NSAk4S2Qz+AKXK9a4wkdjcQ==", "engines": { "node": ">=8.3.0" }, @@ -908,9 +958,33 @@ "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" }, "node-fetch": { - "version": "2.6.1", - "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.6.1.tgz", - "integrity": "sha512-V4aYg89jEoVRxRb2fJdAg8FHvI7cEyYdVAh94HH0UIK8oJxUfkjlDQN9RbMx+bEjP7+ggMiFRprSti032Oipxw==" + "version": "2.6.7", + "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.6.7.tgz", + "integrity": "sha512-ZjMPFEfVx5j+y2yF35Kzx5sF7kDzxuDj6ziH4FFbOp87zKDZNx8yExJIb05OGF4Nlt9IHFIMBkRl41VdvcNdbQ==", + "requires": { + "whatwg-url": "^5.0.0" + }, + "dependencies": { + "tr46": { + "version": "0.0.3", + "resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz", + "integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==" + }, + "webidl-conversions": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz", + "integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==" + }, + "whatwg-url": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz", + "integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==", + "requires": { + "tr46": "~0.0.3", + "webidl-conversions": "^3.0.0" + } + } + } }, "nwsapi": { "version": "2.2.0", @@ -950,6 +1024,16 @@ "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.1.1.tgz", "integrity": "sha512-XRsRjdf+j5ml+y/6GKHPZbrF/8p2Yga0JPtdqTIY2Xe5ohJPD9saDJJLPvp9+NSBprVvevdXZybnj2cv8OEd0A==" }, + "querystringify": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/querystringify/-/querystringify-2.2.0.tgz", + "integrity": "sha512-FIqgj2EUvTa7R50u0rGsyTftzjYmv/a3hO345bZNrqabNqjtgiDMgmo4mkUjd+nzU5oF3dClKqFIPUKybUyqoQ==" + }, + "requires-port": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/requires-port/-/requires-port-1.0.0.tgz", + "integrity": "sha512-KigOCHcocU3XODJxsu8i/j8T9tzT4adHiecwORRQ0ZZFcp7ahwXuRU1m+yuO90C5ZUyGeGfocHDI14M3L3yDAQ==" + }, "safer-buffer": { "version": "2.1.2", "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", @@ -975,13 +1059,14 @@ "integrity": "sha512-9QNk5KwDF+Bvz+PyObkmSYjI5ksVUYtjW7AU22r2NKcfLJcXp96hkDWU3+XndOsUb+AQ9QhfzfCT2O+CNWT5Tw==" }, "tough-cookie": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-4.0.0.tgz", - "integrity": "sha512-tHdtEpQCMrc1YLrMaqXXcj6AxhYi/xgit6mZu1+EDWUn+qhUf8wMQoFIy9NXuq23zAwtcB0t/MjACGR18pcRbg==", + "version": "4.1.3", + "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-4.1.3.tgz", + "integrity": "sha512-aX/y5pVRkfRnfmuX+OdbSdXvPe6ieKX/G2s7e98f4poJHnqH3281gDPm/metm6E/WRamfx7WC4HUqkWHfQHprw==", "requires": { "psl": "^1.1.33", "punycode": "^2.1.1", - "universalify": "^0.1.2" + "universalify": "^0.2.0", + "url-parse": "^1.5.3" } }, "tr46": { @@ -1009,9 +1094,18 @@ } }, "universalify": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-0.1.2.tgz", - "integrity": "sha512-rBJeI5CXAlmy1pV+617WB9J63U6XcazHHF2f2dbJix4XzpUF0RS3Zbj0FGIOCAva5P/d/GBOYaACQ1w+0azUkg==" + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/universalify/-/universalify-0.2.0.tgz", + "integrity": "sha512-CJ1QgKmNg3CwvAv/kOFmtnEN05f0D/cn9QntgNOQlQF9dgvVTHj3t+8JPdjqawCHk7V/KA+fbUqzZ9XWhcqPUg==" + }, + "url-parse": { + "version": "1.5.10", + "resolved": "https://registry.npmjs.org/url-parse/-/url-parse-1.5.10.tgz", + "integrity": "sha512-WypcfiRhfeUP9vvF0j6rw0J3hrWrw6iZv3+22h6iRMJ/8z1Tj6XfLP4DsUix5MhMPnXpiHDoKyoZ/bdCkwBCiQ==", + "requires": { + "querystringify": "^2.1.1", + "requires-port": "^1.0.0" + } }, "w3c-hr-time": { "version": "1.0.2", @@ -1058,14 +1152,14 @@ } }, "word-wrap": { - "version": "1.2.3", - "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.3.tgz", - "integrity": "sha512-Hz/mrNwitNRh/HUAtM/VT/5VH+ygD6DV7mYKZAtHOrbs8U7lvPS6xf7EJKMF0uW1KJCl0H701g3ZGus+muE5vQ==" + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.4.tgz", + "integrity": "sha512-2V81OA4ugVo5pRo46hAoD2ivUJx8jXmWXfUkY4KFNw0hEptvN0QfH3K4nHiwzGeKl5rFKedV48QVoqYavy4YpA==" }, "ws": { - "version": "7.5.3", - "resolved": "https://registry.npmjs.org/ws/-/ws-7.5.3.tgz", - "integrity": "sha512-kQ/dHIzuLrS6Je9+uv81ueZomEwH0qVYstcAQ4/Z93K8zeko9gtAbttJWzoC5ukqXY1PpoouV3+VSOqEAFt5wg==", + "version": "7.5.10", + "resolved": "https://registry.npmjs.org/ws/-/ws-7.5.10.tgz", + "integrity": "sha512-+dbF1tHwZpXcbOJdVOkzLDxZP1ailvSxM6ZweXTegylPny803bFhA+vqBYw4s31NSAk4S2Qz+AKXK9a4wkdjcQ==", "requires": {} }, "xml-name-validator": { diff --git a/mmv1/tools/doc-parser/package.json b/mmv1/tools/doc-parser/package.json index aa775fe5b..12092774e 100644 --- a/mmv1/tools/doc-parser/package.json +++ b/mmv1/tools/doc-parser/package.json @@ -14,7 +14,7 @@ "license": "Apache License Version 2.0", "dependencies": { "jsdom": "^16.6.0", - "node-fetch": "^2.6.1", + "node-fetch": "^2.6.7", "turndown": "^7.1.1" } }