diff --git a/docs/dyn/aiplatform_v1.projects.locations.batchPredictionJobs.html b/docs/dyn/aiplatform_v1.projects.locations.batchPredictionJobs.html index 6f9feb90447..312da3ee01a 100644 --- a/docs/dyn/aiplatform_v1.projects.locations.batchPredictionJobs.html +++ b/docs/dyn/aiplatform_v1.projects.locations.batchPredictionJobs.html @@ -288,10 +288,10 @@

Method Details

"instancesFormat": "A String", # Required. The format in which instances are given, must be one of the Model's supported_input_storage_formats. }, "instanceConfig": { # Configuration defining how to transform batch prediction input instances to the instances that the Model accepts. # Configuration for how to convert batch prediction input instances to the prediction instances that are sent to the Model. - "excludedFields": [ # Fields that will be excluded in the prediction instance that is sent to the Model. Excluded will be attached to the batch prediction output if key_field is not specified. When excluded_fields is populated, included_fields must be empty. The input must be JSONL with objects at each line, CSV, BigQuery or TfRecord. + "excludedFields": [ # Fields that will be excluded in the prediction instance that is sent to the Model. Excluded will be attached to the batch prediction output if key_field is not specified. When excluded_fields is populated, included_fields must be empty. The input must be JSONL with objects at each line, BigQuery or TfRecord. "A String", ], - "includedFields": [ # Fields that will be included in the prediction instance that is sent to the Model. If instance_type is `array`, the order of field names in included_fields also determines the order of the values in the array. When included_fields is populated, excluded_fields must be empty. The input must be JSONL with objects at each line, CSV, BigQuery or TfRecord. + "includedFields": [ # Fields that will be included in the prediction instance that is sent to the Model. If instance_type is `array`, the order of field names in included_fields also determines the order of the values in the array. When included_fields is populated, excluded_fields must be empty. The input must be JSONL with objects at each line, BigQuery or TfRecord. "A String", ], "instanceType": "A String", # The format of the instance that the Model accepts. Vertex AI will convert compatible batch prediction input instance formats to the specified format. Supported values are: * `object`: Each input is converted to JSON object format. * For `bigquery`, each row is converted to an object. * For `jsonl`, each line of the JSONL input must be an object. * Does not apply to `csv`, `file-list`, `tf-record`, or `tf-record-gzip`. * `array`: Each input is converted to JSON array format. * For `bigquery`, each row is converted to an array. The order of columns is determined by the BigQuery column order, unless included_fields is populated. included_fields must be populated for specifying field orders. * For `jsonl`, if each line of the JSONL input is an object, included_fields must be populated for specifying field orders. * Does not apply to `csv`, `file-list`, `tf-record`, or `tf-record-gzip`. If not specified, Vertex AI converts the batch prediction input as follows: * For `bigquery` and `csv`, the behavior is the same as `array`. The order of columns is the same as defined in the file or table, unless included_fields is populated. * For `jsonl`, the prediction instance format is determined by each line of the input. * For `tf-record`/`tf-record-gzip`, each record will be converted to an object in the format of `{"b64": }`, where `` is the Base64-encoded string of the content of the record. * For `file-list`, each file in the list will be converted to an object in the format of `{"b64": }`, where `` is the Base64-encoded string of the content of the file. @@ -558,10 +558,10 @@

Method Details

"instancesFormat": "A String", # Required. The format in which instances are given, must be one of the Model's supported_input_storage_formats. }, "instanceConfig": { # Configuration defining how to transform batch prediction input instances to the instances that the Model accepts. # Configuration for how to convert batch prediction input instances to the prediction instances that are sent to the Model. - "excludedFields": [ # Fields that will be excluded in the prediction instance that is sent to the Model. Excluded will be attached to the batch prediction output if key_field is not specified. When excluded_fields is populated, included_fields must be empty. The input must be JSONL with objects at each line, CSV, BigQuery or TfRecord. + "excludedFields": [ # Fields that will be excluded in the prediction instance that is sent to the Model. Excluded will be attached to the batch prediction output if key_field is not specified. When excluded_fields is populated, included_fields must be empty. The input must be JSONL with objects at each line, BigQuery or TfRecord. "A String", ], - "includedFields": [ # Fields that will be included in the prediction instance that is sent to the Model. If instance_type is `array`, the order of field names in included_fields also determines the order of the values in the array. When included_fields is populated, excluded_fields must be empty. The input must be JSONL with objects at each line, CSV, BigQuery or TfRecord. + "includedFields": [ # Fields that will be included in the prediction instance that is sent to the Model. If instance_type is `array`, the order of field names in included_fields also determines the order of the values in the array. When included_fields is populated, excluded_fields must be empty. The input must be JSONL with objects at each line, BigQuery or TfRecord. "A String", ], "instanceType": "A String", # The format of the instance that the Model accepts. Vertex AI will convert compatible batch prediction input instance formats to the specified format. Supported values are: * `object`: Each input is converted to JSON object format. * For `bigquery`, each row is converted to an object. * For `jsonl`, each line of the JSONL input must be an object. * Does not apply to `csv`, `file-list`, `tf-record`, or `tf-record-gzip`. * `array`: Each input is converted to JSON array format. * For `bigquery`, each row is converted to an array. The order of columns is determined by the BigQuery column order, unless included_fields is populated. included_fields must be populated for specifying field orders. * For `jsonl`, if each line of the JSONL input is an object, included_fields must be populated for specifying field orders. * Does not apply to `csv`, `file-list`, `tf-record`, or `tf-record-gzip`. If not specified, Vertex AI converts the batch prediction input as follows: * For `bigquery` and `csv`, the behavior is the same as `array`. The order of columns is the same as defined in the file or table, unless included_fields is populated. * For `jsonl`, the prediction instance format is determined by each line of the input. * For `tf-record`/`tf-record-gzip`, each record will be converted to an object in the format of `{"b64": }`, where `` is the Base64-encoded string of the content of the record. * For `file-list`, each file in the list will be converted to an object in the format of `{"b64": }`, where `` is the Base64-encoded string of the content of the file. @@ -870,10 +870,10 @@

Method Details

"instancesFormat": "A String", # Required. The format in which instances are given, must be one of the Model's supported_input_storage_formats. }, "instanceConfig": { # Configuration defining how to transform batch prediction input instances to the instances that the Model accepts. # Configuration for how to convert batch prediction input instances to the prediction instances that are sent to the Model. - "excludedFields": [ # Fields that will be excluded in the prediction instance that is sent to the Model. Excluded will be attached to the batch prediction output if key_field is not specified. When excluded_fields is populated, included_fields must be empty. The input must be JSONL with objects at each line, CSV, BigQuery or TfRecord. + "excludedFields": [ # Fields that will be excluded in the prediction instance that is sent to the Model. Excluded will be attached to the batch prediction output if key_field is not specified. When excluded_fields is populated, included_fields must be empty. The input must be JSONL with objects at each line, BigQuery or TfRecord. "A String", ], - "includedFields": [ # Fields that will be included in the prediction instance that is sent to the Model. If instance_type is `array`, the order of field names in included_fields also determines the order of the values in the array. When included_fields is populated, excluded_fields must be empty. The input must be JSONL with objects at each line, CSV, BigQuery or TfRecord. + "includedFields": [ # Fields that will be included in the prediction instance that is sent to the Model. If instance_type is `array`, the order of field names in included_fields also determines the order of the values in the array. When included_fields is populated, excluded_fields must be empty. The input must be JSONL with objects at each line, BigQuery or TfRecord. "A String", ], "instanceType": "A String", # The format of the instance that the Model accepts. Vertex AI will convert compatible batch prediction input instance formats to the specified format. Supported values are: * `object`: Each input is converted to JSON object format. * For `bigquery`, each row is converted to an object. * For `jsonl`, each line of the JSONL input must be an object. * Does not apply to `csv`, `file-list`, `tf-record`, or `tf-record-gzip`. * `array`: Each input is converted to JSON array format. * For `bigquery`, each row is converted to an array. The order of columns is determined by the BigQuery column order, unless included_fields is populated. included_fields must be populated for specifying field orders. * For `jsonl`, if each line of the JSONL input is an object, included_fields must be populated for specifying field orders. * Does not apply to `csv`, `file-list`, `tf-record`, or `tf-record-gzip`. If not specified, Vertex AI converts the batch prediction input as follows: * For `bigquery` and `csv`, the behavior is the same as `array`. The order of columns is the same as defined in the file or table, unless included_fields is populated. * For `jsonl`, the prediction instance format is determined by each line of the input. * For `tf-record`/`tf-record-gzip`, each record will be converted to an object in the format of `{"b64": }`, where `` is the Base64-encoded string of the content of the record. * For `file-list`, each file in the list will be converted to an object in the format of `{"b64": }`, where `` is the Base64-encoded string of the content of the file. @@ -1153,10 +1153,10 @@

Method Details

"instancesFormat": "A String", # Required. The format in which instances are given, must be one of the Model's supported_input_storage_formats. }, "instanceConfig": { # Configuration defining how to transform batch prediction input instances to the instances that the Model accepts. # Configuration for how to convert batch prediction input instances to the prediction instances that are sent to the Model. - "excludedFields": [ # Fields that will be excluded in the prediction instance that is sent to the Model. Excluded will be attached to the batch prediction output if key_field is not specified. When excluded_fields is populated, included_fields must be empty. The input must be JSONL with objects at each line, CSV, BigQuery or TfRecord. + "excludedFields": [ # Fields that will be excluded in the prediction instance that is sent to the Model. Excluded will be attached to the batch prediction output if key_field is not specified. When excluded_fields is populated, included_fields must be empty. The input must be JSONL with objects at each line, BigQuery or TfRecord. "A String", ], - "includedFields": [ # Fields that will be included in the prediction instance that is sent to the Model. If instance_type is `array`, the order of field names in included_fields also determines the order of the values in the array. When included_fields is populated, excluded_fields must be empty. The input must be JSONL with objects at each line, CSV, BigQuery or TfRecord. + "includedFields": [ # Fields that will be included in the prediction instance that is sent to the Model. If instance_type is `array`, the order of field names in included_fields also determines the order of the values in the array. When included_fields is populated, excluded_fields must be empty. The input must be JSONL with objects at each line, BigQuery or TfRecord. "A String", ], "instanceType": "A String", # The format of the instance that the Model accepts. Vertex AI will convert compatible batch prediction input instance formats to the specified format. Supported values are: * `object`: Each input is converted to JSON object format. * For `bigquery`, each row is converted to an object. * For `jsonl`, each line of the JSONL input must be an object. * Does not apply to `csv`, `file-list`, `tf-record`, or `tf-record-gzip`. * `array`: Each input is converted to JSON array format. * For `bigquery`, each row is converted to an array. The order of columns is determined by the BigQuery column order, unless included_fields is populated. included_fields must be populated for specifying field orders. * For `jsonl`, if each line of the JSONL input is an object, included_fields must be populated for specifying field orders. * Does not apply to `csv`, `file-list`, `tf-record`, or `tf-record-gzip`. If not specified, Vertex AI converts the batch prediction input as follows: * For `bigquery` and `csv`, the behavior is the same as `array`. The order of columns is the same as defined in the file or table, unless included_fields is populated. * For `jsonl`, the prediction instance format is determined by each line of the input. * For `tf-record`/`tf-record-gzip`, each record will be converted to an object in the format of `{"b64": }`, where `` is the Base64-encoded string of the content of the record. * For `file-list`, each file in the list will be converted to an object in the format of `{"b64": }`, where `` is the Base64-encoded string of the content of the file. diff --git a/docs/dyn/aiplatform_v1.projects.locations.customJobs.html b/docs/dyn/aiplatform_v1.projects.locations.customJobs.html index dc6d6de35ef..4024e0394e4 100644 --- a/docs/dyn/aiplatform_v1.projects.locations.customJobs.html +++ b/docs/dyn/aiplatform_v1.projects.locations.customJobs.html @@ -163,6 +163,9 @@

Method Details

"enableWebAccess": True or False, # Optional. Whether you want Vertex AI to enable [interactive shell access](https://cloud.google.com/vertex-ai/docs/training/monitor-debug-interactive-shell) to training containers. If set to `true`, you can access interactive shells at the URIs given by CustomJob.web_access_uris or Trial.web_access_uris (within HyperparameterTuningJob.trials). "experiment": "A String", # Optional. The Experiment associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}` "experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}` + "models": [ # Optional. The name of the Model resources for which to generate a mapping to artifact URIs. Applicable only to some of the Google-provided custom jobs. Format: `projects/{project}/locations/{location}/models/{model}` In order to retrieve a specific version of the model, also provide the version ID or version alias. Example: `projects/{project}/locations/{location}/models/{model}@2` or `projects/{project}/locations/{location}/models/{model}@golden` If no version ID or alias is specified, the "default" version will be returned. The "default" version alias is created for the first version of the model, and can be moved to other versions later on. There will be exactly one default version. + "A String", + ], "network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations "reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. @@ -273,6 +276,9 @@

Method Details

"enableWebAccess": True or False, # Optional. Whether you want Vertex AI to enable [interactive shell access](https://cloud.google.com/vertex-ai/docs/training/monitor-debug-interactive-shell) to training containers. If set to `true`, you can access interactive shells at the URIs given by CustomJob.web_access_uris or Trial.web_access_uris (within HyperparameterTuningJob.trials). "experiment": "A String", # Optional. The Experiment associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}` "experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}` + "models": [ # Optional. The name of the Model resources for which to generate a mapping to artifact URIs. Applicable only to some of the Google-provided custom jobs. Format: `projects/{project}/locations/{location}/models/{model}` In order to retrieve a specific version of the model, also provide the version ID or version alias. Example: `projects/{project}/locations/{location}/models/{model}@2` or `projects/{project}/locations/{location}/models/{model}@golden` If no version ID or alias is specified, the "default" version will be returned. The "default" version alias is created for the first version of the model, and can be moved to other versions later on. There will be exactly one default version. + "A String", + ], "network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations "reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. @@ -425,6 +431,9 @@

Method Details

"enableWebAccess": True or False, # Optional. Whether you want Vertex AI to enable [interactive shell access](https://cloud.google.com/vertex-ai/docs/training/monitor-debug-interactive-shell) to training containers. If set to `true`, you can access interactive shells at the URIs given by CustomJob.web_access_uris or Trial.web_access_uris (within HyperparameterTuningJob.trials). "experiment": "A String", # Optional. The Experiment associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}` "experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}` + "models": [ # Optional. The name of the Model resources for which to generate a mapping to artifact URIs. Applicable only to some of the Google-provided custom jobs. Format: `projects/{project}/locations/{location}/models/{model}` In order to retrieve a specific version of the model, also provide the version ID or version alias. Example: `projects/{project}/locations/{location}/models/{model}@2` or `projects/{project}/locations/{location}/models/{model}@golden` If no version ID or alias is specified, the "default" version will be returned. The "default" version alias is created for the first version of the model, and can be moved to other versions later on. There will be exactly one default version. + "A String", + ], "network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations "reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. @@ -548,6 +557,9 @@

Method Details

"enableWebAccess": True or False, # Optional. Whether you want Vertex AI to enable [interactive shell access](https://cloud.google.com/vertex-ai/docs/training/monitor-debug-interactive-shell) to training containers. If set to `true`, you can access interactive shells at the URIs given by CustomJob.web_access_uris or Trial.web_access_uris (within HyperparameterTuningJob.trials). "experiment": "A String", # Optional. The Experiment associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}` "experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}` + "models": [ # Optional. The name of the Model resources for which to generate a mapping to artifact URIs. Applicable only to some of the Google-provided custom jobs. Format: `projects/{project}/locations/{location}/models/{model}` In order to retrieve a specific version of the model, also provide the version ID or version alias. Example: `projects/{project}/locations/{location}/models/{model}@2` or `projects/{project}/locations/{location}/models/{model}@golden` If no version ID or alias is specified, the "default" version will be returned. The "default" version alias is created for the first version of the model, and can be moved to other versions later on. There will be exactly one default version. + "A String", + ], "network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations "reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. diff --git a/docs/dyn/aiplatform_v1.projects.locations.datasets.html b/docs/dyn/aiplatform_v1.projects.locations.datasets.html index 7b1b47f5c2b..94da36599f7 100644 --- a/docs/dyn/aiplatform_v1.projects.locations.datasets.html +++ b/docs/dyn/aiplatform_v1.projects.locations.datasets.html @@ -255,7 +255,7 @@

Method Details

{ # Request message for DatasetService.ExportData. "exportConfig": { # Describes what part of the Dataset is to be exported, the destination of the export and how to export. # Required. The desired output location. - "annotationSchemaUri": "A String", # Only used for custom training data export use cases. Only applicable to Datasets that have DataItems and Annotations. Cloud Storage URI that points to a YAML file describing the annotation schema. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). The schema files that can be used here are found in gs://google-cloud-aiplatform/schema/dataset/annotation/, note that the chosen schema must be consistent with metadata of the Dataset specified by dataset_id. Only Annotations that both match this schema and belong to DataItems not ignored by the split method are used in respectively training, validation or test role, depending on the role of the DataItem they are on. When used in conjunction with annotations_filter, the Annotations used for training are filtered by both annotations_filter and annotation_schema_uri. + "annotationSchemaUri": "A String", # The Cloud Storage URI that points to a YAML file describing the annotation schema. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). The schema files that can be used here are found in gs://google-cloud-aiplatform/schema/dataset/annotation/, note that the chosen schema must be consistent with metadata of the Dataset specified by dataset_id. Only used for custom training data export use cases. Only applicable to Datasets that have DataItems and Annotations. Only Annotations that both match this schema and belong to DataItems not ignored by the split method are used in respectively training, validation or test role, depending on the role of the DataItem they are on. When used in conjunction with annotations_filter, the Annotations used for training are filtered by both annotations_filter and annotation_schema_uri. "annotationsFilter": "A String", # An expression for filtering what part of the Dataset is to be exported. Only Annotations that match this filter will be exported. The filter syntax is the same as in ListAnnotations. "exportUse": "A String", # Indicates the usage of the exported files. "filterSplit": { # Assigns input data to training, validation, and test sets based on the given filters, data pieces not matched by any filter are ignored. Currently only supported for Datasets containing DataItems. If any of the filters in this message are to match nothing, then they can be set as '-' (the minus sign). Supported only for unstructured Datasets. # Split based on the provided filters for each set. @@ -271,7 +271,7 @@

Method Details

"gcsDestination": { # The Google Cloud Storage location where the output is to be written to. # The Google Cloud Storage location where the output is to be written to. In the given directory a new directory will be created with name: `export-data--` where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. All export output will be written into that directory. Inside that directory, annotations with the same schema will be grouped into sub directories which are named with the corresponding annotations' schema title. Inside these sub directories, a schema.yaml will be created to describe the output format. "outputUriPrefix": "A String", # Required. Google Cloud Storage URI to output directory. If the uri doesn't end with '/', a '/' will be automatically appended. The directory is created if it doesn't exist. }, - "savedQueryId": "A String", # Only used for custom training data export use cases. Only applicable to Datasets that have SavedQueries. The ID of a SavedQuery (annotation set) under the Dataset specified by dataset_id used for filtering Annotations for training. Only Annotations that are associated with this SavedQuery are used in respectively training. When used in conjunction with annotations_filter, the Annotations used for training are filtered by both saved_query_id and annotations_filter. Only one of saved_query_id and annotation_schema_uri should be specified as both of them represent the same thing: problem type. + "savedQueryId": "A String", # The ID of a SavedQuery (annotation set) under the Dataset specified by dataset_id used for filtering Annotations for training. Only used for custom training data export use cases. Only applicable to Datasets that have SavedQueries. Only Annotations that are associated with this SavedQuery are used in respectively training. When used in conjunction with annotations_filter, the Annotations used for training are filtered by both saved_query_id and annotations_filter. Only one of saved_query_id and annotation_schema_uri should be specified as both of them represent the same thing: problem type. }, } diff --git a/docs/dyn/aiplatform_v1.projects.locations.endpoints.html b/docs/dyn/aiplatform_v1.projects.locations.endpoints.html index f4a31e739c8..bd4b8f1f8df 100644 --- a/docs/dyn/aiplatform_v1.projects.locations.endpoints.html +++ b/docs/dyn/aiplatform_v1.projects.locations.endpoints.html @@ -187,28 +187,16 @@

Method Details

{ # Request message for PredictionService.CountTokens. "contents": [ # Required. Input content. - { # A single turn in a conversation with the model. - "parts": [ # Required. Ordered parts that make up a message. Parts may have different MIME types. - { # Content part. + { # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. + "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. + { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. "fileData": { # URI based data. # Optional. URI based data. "fileUri": "A String", # Required. URI. - "mimeType": "A String", # Required. Mime type of the data. + "mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, - "functionCall": { # A predicted [FunctionCall] returned from the model that contains a string representing the [FunctionDeclaration.name] and a structured JSON object containing the parameters and their values. # Optional. A predicted [FunctionCall] returned from the model that contains a string representing the [FunctionDeclaration.name] with the parameters and their values. - "args": { # Optional. Required. The function parameters and values in JSON object format. See [FunctionDeclaration.parameters] for parameter details. - "a_key": "", # Properties of the object. - }, - "name": "A String", # Required. The name of the function to call. Matches [FunctionDeclaration.name]. - }, - "functionResponse": { # The result output from a [FunctionCall] that contains a string representing the [FunctionDeclaration.name] and a structured JSON object containing any output from the function is used as context to the model. This should contain the result of a [FunctionCall] made based on model prediction. # Optional. The result output of a [FunctionCall] that contains a string representing the [FunctionDeclaration.name] and a structured JSON object containing any output from the function call. It is used as context to the model. - "name": "A String", # Required. The name of the function to call. Matches [FunctionDeclaration.name] and [FunctionCall.name]. - "response": { # Required. The function response in JSON object format. - "a_key": "", # Properties of the object. - }, - }, - "inlineData": { # Content blob. # Optional. Inlined bytes data. - "data": "A String", # Required. Data. - "mimeType": "A String", # Required. Mime type of the data. + "inlineData": { # Raw media bytes. Text should not be sent as raw bytes, use the 'text' field. # Optional. Inlined bytes data. + "data": "A String", # Required. Raw bytes for media formats. + "mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). "videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data. @@ -217,7 +205,7 @@

Method Details

}, }, ], - "role": "A String", # Optional. The role in a conversation associated with this content. Set it only if a content represents a turn in a conversations, otherwise no need to set role. Possible values: user, model. + "role": "A String", # Optional. The producer of the content. Must be either 'user' or 'model'. Useful to set for multi-turn conversations, otherwise can be left blank or unset. }, ], "instances": [ # Required. The instances that are the input to token counting call. Schema is identical to the prediction schema of the underlying model. diff --git a/docs/dyn/aiplatform_v1.projects.locations.hyperparameterTuningJobs.html b/docs/dyn/aiplatform_v1.projects.locations.hyperparameterTuningJobs.html index c66ee00d780..0791e580cc4 100644 --- a/docs/dyn/aiplatform_v1.projects.locations.hyperparameterTuningJobs.html +++ b/docs/dyn/aiplatform_v1.projects.locations.hyperparameterTuningJobs.html @@ -264,6 +264,9 @@

Method Details

"enableWebAccess": True or False, # Optional. Whether you want Vertex AI to enable [interactive shell access](https://cloud.google.com/vertex-ai/docs/training/monitor-debug-interactive-shell) to training containers. If set to `true`, you can access interactive shells at the URIs given by CustomJob.web_access_uris or Trial.web_access_uris (within HyperparameterTuningJob.trials). "experiment": "A String", # Optional. The Experiment associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}` "experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}` + "models": [ # Optional. The name of the Model resources for which to generate a mapping to artifact URIs. Applicable only to some of the Google-provided custom jobs. Format: `projects/{project}/locations/{location}/models/{model}` In order to retrieve a specific version of the model, also provide the version ID or version alias. Example: `projects/{project}/locations/{location}/models/{model}@2` or `projects/{project}/locations/{location}/models/{model}@golden` If no version ID or alias is specified, the "default" version will be returned. The "default" version alias is created for the first version of the model, and can be moved to other versions later on. There will be exactly one default version. + "A String", + ], "network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations "reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. @@ -509,6 +512,9 @@

Method Details

"enableWebAccess": True or False, # Optional. Whether you want Vertex AI to enable [interactive shell access](https://cloud.google.com/vertex-ai/docs/training/monitor-debug-interactive-shell) to training containers. If set to `true`, you can access interactive shells at the URIs given by CustomJob.web_access_uris or Trial.web_access_uris (within HyperparameterTuningJob.trials). "experiment": "A String", # Optional. The Experiment associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}` "experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}` + "models": [ # Optional. The name of the Model resources for which to generate a mapping to artifact URIs. Applicable only to some of the Google-provided custom jobs. Format: `projects/{project}/locations/{location}/models/{model}` In order to retrieve a specific version of the model, also provide the version ID or version alias. Example: `projects/{project}/locations/{location}/models/{model}@2` or `projects/{project}/locations/{location}/models/{model}@golden` If no version ID or alias is specified, the "default" version will be returned. The "default" version alias is created for the first version of the model, and can be moved to other versions later on. There will be exactly one default version. + "A String", + ], "network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations "reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. @@ -796,6 +802,9 @@

Method Details

"enableWebAccess": True or False, # Optional. Whether you want Vertex AI to enable [interactive shell access](https://cloud.google.com/vertex-ai/docs/training/monitor-debug-interactive-shell) to training containers. If set to `true`, you can access interactive shells at the URIs given by CustomJob.web_access_uris or Trial.web_access_uris (within HyperparameterTuningJob.trials). "experiment": "A String", # Optional. The Experiment associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}` "experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}` + "models": [ # Optional. The name of the Model resources for which to generate a mapping to artifact URIs. Applicable only to some of the Google-provided custom jobs. Format: `projects/{project}/locations/{location}/models/{model}` In order to retrieve a specific version of the model, also provide the version ID or version alias. Example: `projects/{project}/locations/{location}/models/{model}@2` or `projects/{project}/locations/{location}/models/{model}@golden` If no version ID or alias is specified, the "default" version will be returned. The "default" version alias is created for the first version of the model, and can be moved to other versions later on. There will be exactly one default version. + "A String", + ], "network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations "reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. @@ -1054,6 +1063,9 @@

Method Details

"enableWebAccess": True or False, # Optional. Whether you want Vertex AI to enable [interactive shell access](https://cloud.google.com/vertex-ai/docs/training/monitor-debug-interactive-shell) to training containers. If set to `true`, you can access interactive shells at the URIs given by CustomJob.web_access_uris or Trial.web_access_uris (within HyperparameterTuningJob.trials). "experiment": "A String", # Optional. The Experiment associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}` "experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}` + "models": [ # Optional. The name of the Model resources for which to generate a mapping to artifact URIs. Applicable only to some of the Google-provided custom jobs. Format: `projects/{project}/locations/{location}/models/{model}` In order to retrieve a specific version of the model, also provide the version ID or version alias. Example: `projects/{project}/locations/{location}/models/{model}@2` or `projects/{project}/locations/{location}/models/{model}@golden` If no version ID or alias is specified, the "default" version will be returned. The "default" version alias is created for the first version of the model, and can be moved to other versions later on. There will be exactly one default version. + "A String", + ], "network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations "reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. diff --git a/docs/dyn/aiplatform_v1.projects.locations.nasJobs.html b/docs/dyn/aiplatform_v1.projects.locations.nasJobs.html index c66feb0e0f8..eff4ab06c3b 100644 --- a/docs/dyn/aiplatform_v1.projects.locations.nasJobs.html +++ b/docs/dyn/aiplatform_v1.projects.locations.nasJobs.html @@ -219,6 +219,9 @@

Method Details

"enableWebAccess": True or False, # Optional. Whether you want Vertex AI to enable [interactive shell access](https://cloud.google.com/vertex-ai/docs/training/monitor-debug-interactive-shell) to training containers. If set to `true`, you can access interactive shells at the URIs given by CustomJob.web_access_uris or Trial.web_access_uris (within HyperparameterTuningJob.trials). "experiment": "A String", # Optional. The Experiment associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}` "experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}` + "models": [ # Optional. The name of the Model resources for which to generate a mapping to artifact URIs. Applicable only to some of the Google-provided custom jobs. Format: `projects/{project}/locations/{location}/models/{model}` In order to retrieve a specific version of the model, also provide the version ID or version alias. Example: `projects/{project}/locations/{location}/models/{model}@2` or `projects/{project}/locations/{location}/models/{model}@golden` If no version ID or alias is specified, the "default" version will be returned. The "default" version alias is created for the first version of the model, and can be moved to other versions later on. There will be exactly one default version. + "A String", + ], "network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations "reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. @@ -297,6 +300,9 @@

Method Details

"enableWebAccess": True or False, # Optional. Whether you want Vertex AI to enable [interactive shell access](https://cloud.google.com/vertex-ai/docs/training/monitor-debug-interactive-shell) to training containers. If set to `true`, you can access interactive shells at the URIs given by CustomJob.web_access_uris or Trial.web_access_uris (within HyperparameterTuningJob.trials). "experiment": "A String", # Optional. The Experiment associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}` "experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}` + "models": [ # Optional. The name of the Model resources for which to generate a mapping to artifact URIs. Applicable only to some of the Google-provided custom jobs. Format: `projects/{project}/locations/{location}/models/{model}` In order to retrieve a specific version of the model, also provide the version ID or version alias. Example: `projects/{project}/locations/{location}/models/{model}@2` or `projects/{project}/locations/{location}/models/{model}@golden` If no version ID or alias is specified, the "default" version will be returned. The "default" version alias is created for the first version of the model, and can be moved to other versions later on. There will be exactly one default version. + "A String", + ], "network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations "reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. @@ -461,6 +467,9 @@

Method Details

"enableWebAccess": True or False, # Optional. Whether you want Vertex AI to enable [interactive shell access](https://cloud.google.com/vertex-ai/docs/training/monitor-debug-interactive-shell) to training containers. If set to `true`, you can access interactive shells at the URIs given by CustomJob.web_access_uris or Trial.web_access_uris (within HyperparameterTuningJob.trials). "experiment": "A String", # Optional. The Experiment associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}` "experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}` + "models": [ # Optional. The name of the Model resources for which to generate a mapping to artifact URIs. Applicable only to some of the Google-provided custom jobs. Format: `projects/{project}/locations/{location}/models/{model}` In order to retrieve a specific version of the model, also provide the version ID or version alias. Example: `projects/{project}/locations/{location}/models/{model}@2` or `projects/{project}/locations/{location}/models/{model}@golden` If no version ID or alias is specified, the "default" version will be returned. The "default" version alias is created for the first version of the model, and can be moved to other versions later on. There will be exactly one default version. + "A String", + ], "network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations "reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. @@ -539,6 +548,9 @@

Method Details

"enableWebAccess": True or False, # Optional. Whether you want Vertex AI to enable [interactive shell access](https://cloud.google.com/vertex-ai/docs/training/monitor-debug-interactive-shell) to training containers. If set to `true`, you can access interactive shells at the URIs given by CustomJob.web_access_uris or Trial.web_access_uris (within HyperparameterTuningJob.trials). "experiment": "A String", # Optional. The Experiment associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}` "experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}` + "models": [ # Optional. The name of the Model resources for which to generate a mapping to artifact URIs. Applicable only to some of the Google-provided custom jobs. Format: `projects/{project}/locations/{location}/models/{model}` In order to retrieve a specific version of the model, also provide the version ID or version alias. Example: `projects/{project}/locations/{location}/models/{model}@2` or `projects/{project}/locations/{location}/models/{model}@golden` If no version ID or alias is specified, the "default" version will be returned. The "default" version alias is created for the first version of the model, and can be moved to other versions later on. There will be exactly one default version. + "A String", + ], "network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations "reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. @@ -745,6 +757,9 @@

Method Details

"enableWebAccess": True or False, # Optional. Whether you want Vertex AI to enable [interactive shell access](https://cloud.google.com/vertex-ai/docs/training/monitor-debug-interactive-shell) to training containers. If set to `true`, you can access interactive shells at the URIs given by CustomJob.web_access_uris or Trial.web_access_uris (within HyperparameterTuningJob.trials). "experiment": "A String", # Optional. The Experiment associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}` "experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}` + "models": [ # Optional. The name of the Model resources for which to generate a mapping to artifact URIs. Applicable only to some of the Google-provided custom jobs. Format: `projects/{project}/locations/{location}/models/{model}` In order to retrieve a specific version of the model, also provide the version ID or version alias. Example: `projects/{project}/locations/{location}/models/{model}@2` or `projects/{project}/locations/{location}/models/{model}@golden` If no version ID or alias is specified, the "default" version will be returned. The "default" version alias is created for the first version of the model, and can be moved to other versions later on. There will be exactly one default version. + "A String", + ], "network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations "reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. @@ -823,6 +838,9 @@

Method Details

"enableWebAccess": True or False, # Optional. Whether you want Vertex AI to enable [interactive shell access](https://cloud.google.com/vertex-ai/docs/training/monitor-debug-interactive-shell) to training containers. If set to `true`, you can access interactive shells at the URIs given by CustomJob.web_access_uris or Trial.web_access_uris (within HyperparameterTuningJob.trials). "experiment": "A String", # Optional. The Experiment associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}` "experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}` + "models": [ # Optional. The name of the Model resources for which to generate a mapping to artifact URIs. Applicable only to some of the Google-provided custom jobs. Format: `projects/{project}/locations/{location}/models/{model}` In order to retrieve a specific version of the model, also provide the version ID or version alias. Example: `projects/{project}/locations/{location}/models/{model}@2` or `projects/{project}/locations/{location}/models/{model}@golden` If no version ID or alias is specified, the "default" version will be returned. The "default" version alias is created for the first version of the model, and can be moved to other versions later on. There will be exactly one default version. + "A String", + ], "network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations "reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. @@ -1000,6 +1018,9 @@

Method Details

"enableWebAccess": True or False, # Optional. Whether you want Vertex AI to enable [interactive shell access](https://cloud.google.com/vertex-ai/docs/training/monitor-debug-interactive-shell) to training containers. If set to `true`, you can access interactive shells at the URIs given by CustomJob.web_access_uris or Trial.web_access_uris (within HyperparameterTuningJob.trials). "experiment": "A String", # Optional. The Experiment associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}` "experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}` + "models": [ # Optional. The name of the Model resources for which to generate a mapping to artifact URIs. Applicable only to some of the Google-provided custom jobs. Format: `projects/{project}/locations/{location}/models/{model}` In order to retrieve a specific version of the model, also provide the version ID or version alias. Example: `projects/{project}/locations/{location}/models/{model}@2` or `projects/{project}/locations/{location}/models/{model}@golden` If no version ID or alias is specified, the "default" version will be returned. The "default" version alias is created for the first version of the model, and can be moved to other versions later on. There will be exactly one default version. + "A String", + ], "network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations "reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. @@ -1078,6 +1099,9 @@

Method Details

"enableWebAccess": True or False, # Optional. Whether you want Vertex AI to enable [interactive shell access](https://cloud.google.com/vertex-ai/docs/training/monitor-debug-interactive-shell) to training containers. If set to `true`, you can access interactive shells at the URIs given by CustomJob.web_access_uris or Trial.web_access_uris (within HyperparameterTuningJob.trials). "experiment": "A String", # Optional. The Experiment associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}` "experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}` + "models": [ # Optional. The name of the Model resources for which to generate a mapping to artifact URIs. Applicable only to some of the Google-provided custom jobs. Format: `projects/{project}/locations/{location}/models/{model}` In order to retrieve a specific version of the model, also provide the version ID or version alias. Example: `projects/{project}/locations/{location}/models/{model}@2` or `projects/{project}/locations/{location}/models/{model}@golden` If no version ID or alias is specified, the "default" version will be returned. The "default" version alias is created for the first version of the model, and can be moved to other versions later on. There will be exactly one default version. + "A String", + ], "network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations "reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. diff --git a/docs/dyn/aiplatform_v1.projects.locations.publishers.models.html b/docs/dyn/aiplatform_v1.projects.locations.publishers.models.html index 1da757361b6..67901a61f41 100644 --- a/docs/dyn/aiplatform_v1.projects.locations.publishers.models.html +++ b/docs/dyn/aiplatform_v1.projects.locations.publishers.models.html @@ -146,28 +146,16 @@

Method Details

{ # Request message for PredictionService.CountTokens. "contents": [ # Required. Input content. - { # A single turn in a conversation with the model. - "parts": [ # Required. Ordered parts that make up a message. Parts may have different MIME types. - { # Content part. + { # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. + "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. + { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. "fileData": { # URI based data. # Optional. URI based data. "fileUri": "A String", # Required. URI. - "mimeType": "A String", # Required. Mime type of the data. + "mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, - "functionCall": { # A predicted [FunctionCall] returned from the model that contains a string representing the [FunctionDeclaration.name] and a structured JSON object containing the parameters and their values. # Optional. A predicted [FunctionCall] returned from the model that contains a string representing the [FunctionDeclaration.name] with the parameters and their values. - "args": { # Optional. Required. The function parameters and values in JSON object format. See [FunctionDeclaration.parameters] for parameter details. - "a_key": "", # Properties of the object. - }, - "name": "A String", # Required. The name of the function to call. Matches [FunctionDeclaration.name]. - }, - "functionResponse": { # The result output from a [FunctionCall] that contains a string representing the [FunctionDeclaration.name] and a structured JSON object containing any output from the function is used as context to the model. This should contain the result of a [FunctionCall] made based on model prediction. # Optional. The result output of a [FunctionCall] that contains a string representing the [FunctionDeclaration.name] and a structured JSON object containing any output from the function call. It is used as context to the model. - "name": "A String", # Required. The name of the function to call. Matches [FunctionDeclaration.name] and [FunctionCall.name]. - "response": { # Required. The function response in JSON object format. - "a_key": "", # Properties of the object. - }, - }, - "inlineData": { # Content blob. # Optional. Inlined bytes data. - "data": "A String", # Required. Data. - "mimeType": "A String", # Required. Mime type of the data. + "inlineData": { # Raw media bytes. Text should not be sent as raw bytes, use the 'text' field. # Optional. Inlined bytes data. + "data": "A String", # Required. Raw bytes for media formats. + "mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). "videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data. @@ -176,7 +164,7 @@

Method Details

}, }, ], - "role": "A String", # Optional. The role in a conversation associated with this content. Set it only if a content represents a turn in a conversations, otherwise no need to set role. Possible values: user, model. + "role": "A String", # Optional. The producer of the content. Must be either 'user' or 'model'. Useful to set for multi-turn conversations, otherwise can be left blank or unset. }, ], "instances": [ # Required. The instances that are the input to token counting call. Schema is identical to the prediction schema of the underlying model. diff --git a/docs/dyn/aiplatform_v1.publishers.models.html b/docs/dyn/aiplatform_v1.publishers.models.html index b4b0f841400..e71d75c72e8 100644 --- a/docs/dyn/aiplatform_v1.publishers.models.html +++ b/docs/dyn/aiplatform_v1.publishers.models.html @@ -124,8 +124,10 @@

Method Details

"createApplication": { # The regional resource name or the URI. Key is region, e.g., us-central1, europe-west2, global, etc.. # Optional. Create application using the PublisherModel. "references": { # Required. "a_key": { # Reference to a resource. + "description": "A String", # Description of the resource. "resourceName": "A String", # The resource name of the Google Cloud resource. "uri": "A String", # The URI of the resource. + "useCase": "A String", # Use case (CUJ) of the resource. }, }, "title": "A String", # Required. The title of the regional resource reference. @@ -203,14 +205,17 @@

Method Details

"name": "A String", # Required. The unique name of the large Foundation or pre-built model. Like "chat-bison", "text-bison". Or model name with version ID, like "chat-bison@001", "text-bison@005", etc. }, "modelDisplayName": "A String", # Optional. Default model display name. + "publicArtifactUri": "A String", # Optional. The signed URI for ephemeral Cloud Storage access to model artifact. "sharedResources": "A String", # The resource name of the shared DeploymentResourcePool to deploy on. Format: `projects/{project}/locations/{location}/deploymentResourcePools/{deployment_resource_pool}` "title": "A String", # Required. The title of the regional resource reference. }, "openEvaluationPipeline": { # The regional resource name or the URI. Key is region, e.g., us-central1, europe-west2, global, etc.. # Optional. Open evaluation pipeline of the PublisherModel. "references": { # Required. "a_key": { # Reference to a resource. + "description": "A String", # Description of the resource. "resourceName": "A String", # The resource name of the Google Cloud resource. "uri": "A String", # The URI of the resource. + "useCase": "A String", # Use case (CUJ) of the resource. }, }, "title": "A String", # Required. The title of the regional resource reference. @@ -218,8 +223,10 @@

Method Details

"openFineTuningPipeline": { # The regional resource name or the URI. Key is region, e.g., us-central1, europe-west2, global, etc.. # Optional. Open fine-tuning pipeline of the PublisherModel. "references": { # Required. "a_key": { # Reference to a resource. + "description": "A String", # Description of the resource. "resourceName": "A String", # The resource name of the Google Cloud resource. "uri": "A String", # The URI of the resource. + "useCase": "A String", # Use case (CUJ) of the resource. }, }, "title": "A String", # Required. The title of the regional resource reference. @@ -227,8 +234,10 @@

Method Details

"openGenerationAiStudio": { # The regional resource name or the URI. Key is region, e.g., us-central1, europe-west2, global, etc.. # Optional. Open in Generation AI Studio. "references": { # Required. "a_key": { # Reference to a resource. + "description": "A String", # Description of the resource. "resourceName": "A String", # The resource name of the Google Cloud resource. "uri": "A String", # The URI of the resource. + "useCase": "A String", # Use case (CUJ) of the resource. }, }, "title": "A String", # Required. The title of the regional resource reference. @@ -236,8 +245,10 @@

Method Details

"openGenie": { # The regional resource name or the URI. Key is region, e.g., us-central1, europe-west2, global, etc.. # Optional. Open Genie / Playground. "references": { # Required. "a_key": { # Reference to a resource. + "description": "A String", # Description of the resource. "resourceName": "A String", # The resource name of the Google Cloud resource. "uri": "A String", # The URI of the resource. + "useCase": "A String", # Use case (CUJ) of the resource. }, }, "title": "A String", # Required. The title of the regional resource reference. @@ -245,8 +256,10 @@

Method Details

"openNotebook": { # The regional resource name or the URI. Key is region, e.g., us-central1, europe-west2, global, etc.. # Optional. Open notebook of the PublisherModel. "references": { # Required. "a_key": { # Reference to a resource. + "description": "A String", # Description of the resource. "resourceName": "A String", # The resource name of the Google Cloud resource. "uri": "A String", # The URI of the resource. + "useCase": "A String", # Use case (CUJ) of the resource. }, }, "title": "A String", # Required. The title of the regional resource reference. @@ -254,8 +267,10 @@

Method Details

"openPromptTuningPipeline": { # The regional resource name or the URI. Key is region, e.g., us-central1, europe-west2, global, etc.. # Optional. Open prompt-tuning pipeline of the PublisherModel. "references": { # Required. "a_key": { # Reference to a resource. + "description": "A String", # Description of the resource. "resourceName": "A String", # The resource name of the Google Cloud resource. "uri": "A String", # The URI of the resource. + "useCase": "A String", # Use case (CUJ) of the resource. }, }, "title": "A String", # Required. The title of the regional resource reference. @@ -263,8 +278,10 @@

Method Details

"requestAccess": { # The regional resource name or the URI. Key is region, e.g., us-central1, europe-west2, global, etc.. # Optional. Request for access. "references": { # Required. "a_key": { # Reference to a resource. + "description": "A String", # Description of the resource. "resourceName": "A String", # The resource name of the Google Cloud resource. "uri": "A String", # The URI of the resource. + "useCase": "A String", # Use case (CUJ) of the resource. }, }, "title": "A String", # Required. The title of the regional resource reference. @@ -280,6 +297,7 @@

Method Details

}, }, "versionId": "A String", # Output only. Immutable. The version ID of the PublisherModel. A new version is committed when a new model version is uploaded under an existing model id. It is an auto-incrementing decimal number in string representation. + "versionState": "A String", # Optional. Indicates the state of the model version. } diff --git a/docs/dyn/aiplatform_v1beta1.projects.locations.batchPredictionJobs.html b/docs/dyn/aiplatform_v1beta1.projects.locations.batchPredictionJobs.html index 67166d351e4..a6cc370df8e 100644 --- a/docs/dyn/aiplatform_v1beta1.projects.locations.batchPredictionJobs.html +++ b/docs/dyn/aiplatform_v1beta1.projects.locations.batchPredictionJobs.html @@ -293,10 +293,10 @@

Method Details

"instancesFormat": "A String", # Required. The format in which instances are given, must be one of the Model's supported_input_storage_formats. }, "instanceConfig": { # Configuration defining how to transform batch prediction input instances to the instances that the Model accepts. # Configuration for how to convert batch prediction input instances to the prediction instances that are sent to the Model. - "excludedFields": [ # Fields that will be excluded in the prediction instance that is sent to the Model. Excluded will be attached to the batch prediction output if key_field is not specified. When excluded_fields is populated, included_fields must be empty. The input must be JSONL with objects at each line, CSV, BigQuery or TfRecord. + "excludedFields": [ # Fields that will be excluded in the prediction instance that is sent to the Model. Excluded will be attached to the batch prediction output if key_field is not specified. When excluded_fields is populated, included_fields must be empty. The input must be JSONL with objects at each line, BigQuery or TfRecord. "A String", ], - "includedFields": [ # Fields that will be included in the prediction instance that is sent to the Model. If instance_type is `array`, the order of field names in included_fields also determines the order of the values in the array. When included_fields is populated, excluded_fields must be empty. The input must be JSONL with objects at each line, CSV, BigQuery or TfRecord. + "includedFields": [ # Fields that will be included in the prediction instance that is sent to the Model. If instance_type is `array`, the order of field names in included_fields also determines the order of the values in the array. When included_fields is populated, excluded_fields must be empty. The input must be JSONL with objects at each line, BigQuery or TfRecord. "A String", ], "instanceType": "A String", # The format of the instance that the Model accepts. Vertex AI will convert compatible batch prediction input instance formats to the specified format. Supported values are: * `object`: Each input is converted to JSON object format. * For `bigquery`, each row is converted to an object. * For `jsonl`, each line of the JSONL input must be an object. * Does not apply to `csv`, `file-list`, `tf-record`, or `tf-record-gzip`. * `array`: Each input is converted to JSON array format. * For `bigquery`, each row is converted to an array. The order of columns is determined by the BigQuery column order, unless included_fields is populated. included_fields must be populated for specifying field orders. * For `jsonl`, if each line of the JSONL input is an object, included_fields must be populated for specifying field orders. * Does not apply to `csv`, `file-list`, `tf-record`, or `tf-record-gzip`. If not specified, Vertex AI converts the batch prediction input as follows: * For `bigquery` and `csv`, the behavior is the same as `array`. The order of columns is the same as defined in the file or table, unless included_fields is populated. * For `jsonl`, the prediction instance format is determined by each line of the input. * For `tf-record`/`tf-record-gzip`, each record will be converted to an object in the format of `{"b64": }`, where `` is the Base64-encoded string of the content of the record. * For `file-list`, each file in the list will be converted to an object in the format of `{"b64": }`, where `` is the Base64-encoded string of the content of the file. @@ -693,10 +693,10 @@

Method Details

"instancesFormat": "A String", # Required. The format in which instances are given, must be one of the Model's supported_input_storage_formats. }, "instanceConfig": { # Configuration defining how to transform batch prediction input instances to the instances that the Model accepts. # Configuration for how to convert batch prediction input instances to the prediction instances that are sent to the Model. - "excludedFields": [ # Fields that will be excluded in the prediction instance that is sent to the Model. Excluded will be attached to the batch prediction output if key_field is not specified. When excluded_fields is populated, included_fields must be empty. The input must be JSONL with objects at each line, CSV, BigQuery or TfRecord. + "excludedFields": [ # Fields that will be excluded in the prediction instance that is sent to the Model. Excluded will be attached to the batch prediction output if key_field is not specified. When excluded_fields is populated, included_fields must be empty. The input must be JSONL with objects at each line, BigQuery or TfRecord. "A String", ], - "includedFields": [ # Fields that will be included in the prediction instance that is sent to the Model. If instance_type is `array`, the order of field names in included_fields also determines the order of the values in the array. When included_fields is populated, excluded_fields must be empty. The input must be JSONL with objects at each line, CSV, BigQuery or TfRecord. + "includedFields": [ # Fields that will be included in the prediction instance that is sent to the Model. If instance_type is `array`, the order of field names in included_fields also determines the order of the values in the array. When included_fields is populated, excluded_fields must be empty. The input must be JSONL with objects at each line, BigQuery or TfRecord. "A String", ], "instanceType": "A String", # The format of the instance that the Model accepts. Vertex AI will convert compatible batch prediction input instance formats to the specified format. Supported values are: * `object`: Each input is converted to JSON object format. * For `bigquery`, each row is converted to an object. * For `jsonl`, each line of the JSONL input must be an object. * Does not apply to `csv`, `file-list`, `tf-record`, or `tf-record-gzip`. * `array`: Each input is converted to JSON array format. * For `bigquery`, each row is converted to an array. The order of columns is determined by the BigQuery column order, unless included_fields is populated. included_fields must be populated for specifying field orders. * For `jsonl`, if each line of the JSONL input is an object, included_fields must be populated for specifying field orders. * Does not apply to `csv`, `file-list`, `tf-record`, or `tf-record-gzip`. If not specified, Vertex AI converts the batch prediction input as follows: * For `bigquery` and `csv`, the behavior is the same as `array`. The order of columns is the same as defined in the file or table, unless included_fields is populated. * For `jsonl`, the prediction instance format is determined by each line of the input. * For `tf-record`/`tf-record-gzip`, each record will be converted to an object in the format of `{"b64": }`, where `` is the Base64-encoded string of the content of the record. * For `file-list`, each file in the list will be converted to an object in the format of `{"b64": }`, where `` is the Base64-encoded string of the content of the file. @@ -1135,10 +1135,10 @@

Method Details

"instancesFormat": "A String", # Required. The format in which instances are given, must be one of the Model's supported_input_storage_formats. }, "instanceConfig": { # Configuration defining how to transform batch prediction input instances to the instances that the Model accepts. # Configuration for how to convert batch prediction input instances to the prediction instances that are sent to the Model. - "excludedFields": [ # Fields that will be excluded in the prediction instance that is sent to the Model. Excluded will be attached to the batch prediction output if key_field is not specified. When excluded_fields is populated, included_fields must be empty. The input must be JSONL with objects at each line, CSV, BigQuery or TfRecord. + "excludedFields": [ # Fields that will be excluded in the prediction instance that is sent to the Model. Excluded will be attached to the batch prediction output if key_field is not specified. When excluded_fields is populated, included_fields must be empty. The input must be JSONL with objects at each line, BigQuery or TfRecord. "A String", ], - "includedFields": [ # Fields that will be included in the prediction instance that is sent to the Model. If instance_type is `array`, the order of field names in included_fields also determines the order of the values in the array. When included_fields is populated, excluded_fields must be empty. The input must be JSONL with objects at each line, CSV, BigQuery or TfRecord. + "includedFields": [ # Fields that will be included in the prediction instance that is sent to the Model. If instance_type is `array`, the order of field names in included_fields also determines the order of the values in the array. When included_fields is populated, excluded_fields must be empty. The input must be JSONL with objects at each line, BigQuery or TfRecord. "A String", ], "instanceType": "A String", # The format of the instance that the Model accepts. Vertex AI will convert compatible batch prediction input instance formats to the specified format. Supported values are: * `object`: Each input is converted to JSON object format. * For `bigquery`, each row is converted to an object. * For `jsonl`, each line of the JSONL input must be an object. * Does not apply to `csv`, `file-list`, `tf-record`, or `tf-record-gzip`. * `array`: Each input is converted to JSON array format. * For `bigquery`, each row is converted to an array. The order of columns is determined by the BigQuery column order, unless included_fields is populated. included_fields must be populated for specifying field orders. * For `jsonl`, if each line of the JSONL input is an object, included_fields must be populated for specifying field orders. * Does not apply to `csv`, `file-list`, `tf-record`, or `tf-record-gzip`. If not specified, Vertex AI converts the batch prediction input as follows: * For `bigquery` and `csv`, the behavior is the same as `array`. The order of columns is the same as defined in the file or table, unless included_fields is populated. * For `jsonl`, the prediction instance format is determined by each line of the input. * For `tf-record`/`tf-record-gzip`, each record will be converted to an object in the format of `{"b64": }`, where `` is the Base64-encoded string of the content of the record. * For `file-list`, each file in the list will be converted to an object in the format of `{"b64": }`, where `` is the Base64-encoded string of the content of the file. @@ -1548,10 +1548,10 @@

Method Details

"instancesFormat": "A String", # Required. The format in which instances are given, must be one of the Model's supported_input_storage_formats. }, "instanceConfig": { # Configuration defining how to transform batch prediction input instances to the instances that the Model accepts. # Configuration for how to convert batch prediction input instances to the prediction instances that are sent to the Model. - "excludedFields": [ # Fields that will be excluded in the prediction instance that is sent to the Model. Excluded will be attached to the batch prediction output if key_field is not specified. When excluded_fields is populated, included_fields must be empty. The input must be JSONL with objects at each line, CSV, BigQuery or TfRecord. + "excludedFields": [ # Fields that will be excluded in the prediction instance that is sent to the Model. Excluded will be attached to the batch prediction output if key_field is not specified. When excluded_fields is populated, included_fields must be empty. The input must be JSONL with objects at each line, BigQuery or TfRecord. "A String", ], - "includedFields": [ # Fields that will be included in the prediction instance that is sent to the Model. If instance_type is `array`, the order of field names in included_fields also determines the order of the values in the array. When included_fields is populated, excluded_fields must be empty. The input must be JSONL with objects at each line, CSV, BigQuery or TfRecord. + "includedFields": [ # Fields that will be included in the prediction instance that is sent to the Model. If instance_type is `array`, the order of field names in included_fields also determines the order of the values in the array. When included_fields is populated, excluded_fields must be empty. The input must be JSONL with objects at each line, BigQuery or TfRecord. "A String", ], "instanceType": "A String", # The format of the instance that the Model accepts. Vertex AI will convert compatible batch prediction input instance formats to the specified format. Supported values are: * `object`: Each input is converted to JSON object format. * For `bigquery`, each row is converted to an object. * For `jsonl`, each line of the JSONL input must be an object. * Does not apply to `csv`, `file-list`, `tf-record`, or `tf-record-gzip`. * `array`: Each input is converted to JSON array format. * For `bigquery`, each row is converted to an array. The order of columns is determined by the BigQuery column order, unless included_fields is populated. included_fields must be populated for specifying field orders. * For `jsonl`, if each line of the JSONL input is an object, included_fields must be populated for specifying field orders. * Does not apply to `csv`, `file-list`, `tf-record`, or `tf-record-gzip`. If not specified, Vertex AI converts the batch prediction input as follows: * For `bigquery` and `csv`, the behavior is the same as `array`. The order of columns is the same as defined in the file or table, unless included_fields is populated. * For `jsonl`, the prediction instance format is determined by each line of the input. * For `tf-record`/`tf-record-gzip`, each record will be converted to an object in the format of `{"b64": }`, where `` is the Base64-encoded string of the content of the record. * For `file-list`, each file in the list will be converted to an object in the format of `{"b64": }`, where `` is the Base64-encoded string of the content of the file. diff --git a/docs/dyn/aiplatform_v1beta1.projects.locations.customJobs.html b/docs/dyn/aiplatform_v1beta1.projects.locations.customJobs.html index e9d288d8626..2c923abb916 100644 --- a/docs/dyn/aiplatform_v1beta1.projects.locations.customJobs.html +++ b/docs/dyn/aiplatform_v1beta1.projects.locations.customJobs.html @@ -163,6 +163,9 @@

Method Details

"enableWebAccess": True or False, # Optional. Whether you want Vertex AI to enable [interactive shell access](https://cloud.google.com/vertex-ai/docs/training/monitor-debug-interactive-shell) to training containers. If set to `true`, you can access interactive shells at the URIs given by CustomJob.web_access_uris or Trial.web_access_uris (within HyperparameterTuningJob.trials). "experiment": "A String", # Optional. The Experiment associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}` "experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}` + "models": [ # Optional. The name of the Model resources for which to generate a mapping to artifact URIs. Applicable only to some of the Google-provided custom jobs. Format: `projects/{project}/locations/{location}/models/{model}` In order to retrieve a specific version of the model, also provide the version ID or version alias. Example: `projects/{project}/locations/{location}/models/{model}@2` or `projects/{project}/locations/{location}/models/{model}@golden` If no version ID or alias is specified, the "default" version will be returned. The "default" version alias is created for the first version of the model, and can be moved to other versions later on. There will be exactly one default version. + "A String", + ], "network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. "persistentResourceId": "A String", # Optional. The ID of the PersistentResource in the same Project and Location which to run If this is specified, the job will be run on existing machines held by the PersistentResource instead of on-demand short-live machines. The network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected. "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations @@ -274,6 +277,9 @@

Method Details

"enableWebAccess": True or False, # Optional. Whether you want Vertex AI to enable [interactive shell access](https://cloud.google.com/vertex-ai/docs/training/monitor-debug-interactive-shell) to training containers. If set to `true`, you can access interactive shells at the URIs given by CustomJob.web_access_uris or Trial.web_access_uris (within HyperparameterTuningJob.trials). "experiment": "A String", # Optional. The Experiment associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}` "experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}` + "models": [ # Optional. The name of the Model resources for which to generate a mapping to artifact URIs. Applicable only to some of the Google-provided custom jobs. Format: `projects/{project}/locations/{location}/models/{model}` In order to retrieve a specific version of the model, also provide the version ID or version alias. Example: `projects/{project}/locations/{location}/models/{model}@2` or `projects/{project}/locations/{location}/models/{model}@golden` If no version ID or alias is specified, the "default" version will be returned. The "default" version alias is created for the first version of the model, and can be moved to other versions later on. There will be exactly one default version. + "A String", + ], "network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. "persistentResourceId": "A String", # Optional. The ID of the PersistentResource in the same Project and Location which to run If this is specified, the job will be run on existing machines held by the PersistentResource instead of on-demand short-live machines. The network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected. "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations @@ -427,6 +433,9 @@

Method Details

"enableWebAccess": True or False, # Optional. Whether you want Vertex AI to enable [interactive shell access](https://cloud.google.com/vertex-ai/docs/training/monitor-debug-interactive-shell) to training containers. If set to `true`, you can access interactive shells at the URIs given by CustomJob.web_access_uris or Trial.web_access_uris (within HyperparameterTuningJob.trials). "experiment": "A String", # Optional. The Experiment associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}` "experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}` + "models": [ # Optional. The name of the Model resources for which to generate a mapping to artifact URIs. Applicable only to some of the Google-provided custom jobs. Format: `projects/{project}/locations/{location}/models/{model}` In order to retrieve a specific version of the model, also provide the version ID or version alias. Example: `projects/{project}/locations/{location}/models/{model}@2` or `projects/{project}/locations/{location}/models/{model}@golden` If no version ID or alias is specified, the "default" version will be returned. The "default" version alias is created for the first version of the model, and can be moved to other versions later on. There will be exactly one default version. + "A String", + ], "network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. "persistentResourceId": "A String", # Optional. The ID of the PersistentResource in the same Project and Location which to run If this is specified, the job will be run on existing machines held by the PersistentResource instead of on-demand short-live machines. The network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected. "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations @@ -551,6 +560,9 @@

Method Details

"enableWebAccess": True or False, # Optional. Whether you want Vertex AI to enable [interactive shell access](https://cloud.google.com/vertex-ai/docs/training/monitor-debug-interactive-shell) to training containers. If set to `true`, you can access interactive shells at the URIs given by CustomJob.web_access_uris or Trial.web_access_uris (within HyperparameterTuningJob.trials). "experiment": "A String", # Optional. The Experiment associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}` "experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}` + "models": [ # Optional. The name of the Model resources for which to generate a mapping to artifact URIs. Applicable only to some of the Google-provided custom jobs. Format: `projects/{project}/locations/{location}/models/{model}` In order to retrieve a specific version of the model, also provide the version ID or version alias. Example: `projects/{project}/locations/{location}/models/{model}@2` or `projects/{project}/locations/{location}/models/{model}@golden` If no version ID or alias is specified, the "default" version will be returned. The "default" version alias is created for the first version of the model, and can be moved to other versions later on. There will be exactly one default version. + "A String", + ], "network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. "persistentResourceId": "A String", # Optional. The ID of the PersistentResource in the same Project and Location which to run If this is specified, the job will be run on existing machines held by the PersistentResource instead of on-demand short-live machines. The network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected. "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations diff --git a/docs/dyn/aiplatform_v1beta1.projects.locations.endpoints.html b/docs/dyn/aiplatform_v1beta1.projects.locations.endpoints.html index f3e4bb9e54e..2278e719dba 100644 --- a/docs/dyn/aiplatform_v1beta1.projects.locations.endpoints.html +++ b/docs/dyn/aiplatform_v1beta1.projects.locations.endpoints.html @@ -199,12 +199,12 @@

Method Details

{ # Request message for PredictionService.CountTokens. "contents": [ # Required. Input content. - { # A single turn in a conversation with the model. - "parts": [ # Required. Ordered parts that make up a message. Parts may have different MIME types. - { # Content part. + { # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. + "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. + { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. "fileData": { # URI based data. # Optional. URI based data. "fileUri": "A String", # Required. URI. - "mimeType": "A String", # Required. Mime type of the data. + "mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "functionCall": { # A predicted [FunctionCall] returned from the model that contains a string representing the [FunctionDeclaration.name] and a structured JSON object containing the parameters and their values. # Optional. A predicted [FunctionCall] returned from the model that contains a string representing the [FunctionDeclaration.name] with the parameters and their values. "args": { # Optional. Required. The function parameters and values in JSON object format. See [FunctionDeclaration.parameters] for parameter details. @@ -218,9 +218,9 @@

Method Details

"a_key": "", # Properties of the object. }, }, - "inlineData": { # Content blob. # Optional. Inlined bytes data. - "data": "A String", # Required. Data. - "mimeType": "A String", # Required. Mime type of the data. + "inlineData": { # Raw media bytes. Text should not be sent as raw bytes, use the 'text' field. # Optional. Inlined bytes data. + "data": "A String", # Required. Raw bytes for media formats. + "mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). "videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data. @@ -229,7 +229,7 @@

Method Details

}, }, ], - "role": "A String", # Optional. The role in a conversation associated with this content. Set it only if a content represents a turn in a conversations, otherwise no need to set role. Possible values: user, model. + "role": "A String", # Optional. The producer of the content. Must be either 'user' or 'model'. Useful to set for multi-turn conversations, otherwise can be left blank or unset. }, ], "instances": [ # Required. The instances that are the input to token counting call. Schema is identical to the prediction schema of the underlying model. @@ -2546,13 +2546,13 @@

Method Details

The object takes the form of: { # Request message for [PredictionService.GenerateContent]. - "contents": [ # Required. Input content. - { # A single turn in a conversation with the model. - "parts": [ # Required. Ordered parts that make up a message. Parts may have different MIME types. - { # Content part. + "contents": [ # Required. The content of the current conversation with the model. For single-turn queries, this is a single instance. For multi-turn queries, this is a repeated field that contains conversation history + latest request. + { # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. + "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. + { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. "fileData": { # URI based data. # Optional. URI based data. "fileUri": "A String", # Required. URI. - "mimeType": "A String", # Required. Mime type of the data. + "mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "functionCall": { # A predicted [FunctionCall] returned from the model that contains a string representing the [FunctionDeclaration.name] and a structured JSON object containing the parameters and their values. # Optional. A predicted [FunctionCall] returned from the model that contains a string representing the [FunctionDeclaration.name] with the parameters and their values. "args": { # Optional. Required. The function parameters and values in JSON object format. See [FunctionDeclaration.parameters] for parameter details. @@ -2566,9 +2566,9 @@

Method Details

"a_key": "", # Properties of the object. }, }, - "inlineData": { # Content blob. # Optional. Inlined bytes data. - "data": "A String", # Required. Data. - "mimeType": "A String", # Required. Mime type of the data. + "inlineData": { # Raw media bytes. Text should not be sent as raw bytes, use the 'text' field. # Optional. Inlined bytes data. + "data": "A String", # Required. Raw bytes for media formats. + "mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). "videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data. @@ -2577,20 +2577,13 @@

Method Details

}, }, ], - "role": "A String", # Optional. The role in a conversation associated with this content. Set it only if a content represents a turn in a conversations, otherwise no need to set role. Possible values: user, model. + "role": "A String", # Optional. The producer of the content. Must be either 'user' or 'model'. Useful to set for multi-turn conversations, otherwise can be left blank or unset. }, ], "endpoint": "A String", # Required. The name of the Endpoint requested to serve the prediction. Format: `projects/{project}/locations/{location}/endpoints/{endpoint}` "generationConfig": { # Generation config. # Optional. Generation config. "candidateCount": 42, # Optional. Number of candidates to generate. - "echo": True or False, # Optional. Echo. - "frequencyPenalty": 3.14, # Optional. Frequency penalties. - "logitBias": { # Optional. Logit bias. - "a_key": 3.14, - }, - "logprobs": 42, # Optional. Logit probabilities. "maxOutputTokens": 42, # Optional. The maximum number of output tokens to generate per message. - "presencePenalty": 3.14, # Optional. Positive penalties. "stopSequences": [ # Optional. Stop sequences. "A String", ], @@ -2604,13 +2597,13 @@

Method Details

"threshold": "A String", # Required. The harm block threshold. }, ], - "tools": [ # Optional. Tools that the model may use to generate response. - { # Tool details that the model may use to generate response. + "tools": [ # Optional. A list of `Tools` the model may use to generate the next response. A `Tool` is a piece of code that enables the system to interact with external systems to perform an action, or set of actions, outside of knowledge and scope of the model. The only supported tool is currently `Function` + { # Tool details that the model may use to generate response. A `Tool` is a piece of code that enables the system to interact with external systems to perform an action, or set of actions, outside of knowledge and scope of the model. "functionDeclarations": [ # Optional. One or more function declarations to be passed to the model along with the current user query. Model may decide to call a subset of these functions by populating FunctionCall in the response. User should provide a FunctionResponse for each function call in the next turn. Based on the function responses, Model will generate the final response back to the user. Maximum 64 function declarations can be provided. - { # Function declaration details. + { # Structured representation of a function declaration as defined by the [OpenAPI 3.0 specification](https://spec.openapis.org/oas/v3.0.3). Included in this declaration are the function name and parameters. This FunctionDeclaration is a representation of a block of code that can be used as a `Tool` by the model and executed by the client. "description": "A String", # Optional. Description and purpose of the function. Model uses it to decide how and whether to call the function. "name": "A String", # Required. The name of the function to call. Must start with a letter or an underscore. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. - "parameters": { # Represents a select subset of an OpenAPI 3.0 Schema object. Schema is used to define the format of input/output data. More fields may be added in the future as needed. https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.2.md#schemaObject # Optional. Describes the parameters to this function. Reflects the Open API 3.03 Parameter Object string Key: the name of the parameter. Parameter names are case sensitive. For function with no parameters, this can be left unset. + "parameters": { # Schema is used to define the format of input/output data. Represents a select subset of an [OpenAPI 3.0 schema object](https://spec.openapis.org/oas/v3.0.3#schema). More fields may be added in the future as needed. # Optional. Describes the parameters to this function in JSON Schema Object format. Reflects the Open API 3.03 Parameter Object. string Key: the name of the parameter. Parameter names are case sensitive. Schema Value: the Schema defining the type used for the parameter. For function with no parameters, this can be left unset. Example with 1 required and 1 optional parameter: type: OBJECT properties: param1: type: STRING param2: type: INTEGER required: - param1 "description": "A String", # Optional. The description of the data. "enum": [ # Optional. Possible values of the element of Type.STRING with enum format. For example we can define an Enum Direction as : {type:STRING, format:enum, enum:["EAST", NORTH", "SOUTH", "WEST"]} "A String", @@ -2643,7 +2636,7 @@

Method Details

{ # Response message for [PredictionService.GenerateContent]. "candidates": [ # Output only. Generated candidates. - { # Generated candidate. + { # A response candidate generated from the model. "citationMetadata": { # A collection of source attributions for a piece of content. # Output only. Source attribution of the generated content. "citations": [ # Output only. List of citations. { # Source attributions for content. @@ -2660,12 +2653,12 @@

Method Details

}, ], }, - "content": { # A single turn in a conversation with the model. # Output only. Content parts of the candidate. - "parts": [ # Required. Ordered parts that make up a message. Parts may have different MIME types. - { # Content part. + "content": { # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. # Output only. Content parts of the candidate. + "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. + { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. "fileData": { # URI based data. # Optional. URI based data. "fileUri": "A String", # Required. URI. - "mimeType": "A String", # Required. Mime type of the data. + "mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "functionCall": { # A predicted [FunctionCall] returned from the model that contains a string representing the [FunctionDeclaration.name] and a structured JSON object containing the parameters and their values. # Optional. A predicted [FunctionCall] returned from the model that contains a string representing the [FunctionDeclaration.name] with the parameters and their values. "args": { # Optional. Required. The function parameters and values in JSON object format. See [FunctionDeclaration.parameters] for parameter details. @@ -2679,9 +2672,9 @@

Method Details

"a_key": "", # Properties of the object. }, }, - "inlineData": { # Content blob. # Optional. Inlined bytes data. - "data": "A String", # Required. Data. - "mimeType": "A String", # Required. Mime type of the data. + "inlineData": { # Raw media bytes. Text should not be sent as raw bytes, use the 'text' field. # Optional. Inlined bytes data. + "data": "A String", # Required. Raw bytes for media formats. + "mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). "videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data. @@ -2690,12 +2683,12 @@

Method Details

}, }, ], - "role": "A String", # Optional. The role in a conversation associated with this content. Set it only if a content represents a turn in a conversations, otherwise no need to set role. Possible values: user, model. + "role": "A String", # Optional. The producer of the content. Must be either 'user' or 'model'. Useful to set for multi-turn conversations, otherwise can be left blank or unset. }, - "finishMessage": "A String", # Output only. A string that describes the filtering behavior in more detail. Only filled when reason is set. - "finishReason": "A String", # Output only. The reason why the model stopped generating tokens. + "finishMessage": "A String", # Output only. Describes the reason the mode stopped generating tokens in more detail. This is only filled when `finish_reason` is set. + "finishReason": "A String", # Output only. The reason why the model stopped generating tokens. If empty, the model has not stopped generating the tokens. "index": 42, # Output only. Index of the candidate. - "safetyRatings": [ # Output only. Safety ratings of the generated content. + "safetyRatings": [ # Output only. List of ratings for the safety of a response candidate. There is at most one rating per category. { # Safety rating corresponding to the generated content. "blocked": True or False, # Output only. Indicates whether the content was filtered out because of this rating. "category": "A String", # Output only. Harm category. diff --git a/docs/dyn/aiplatform_v1beta1.projects.locations.hyperparameterTuningJobs.html b/docs/dyn/aiplatform_v1beta1.projects.locations.hyperparameterTuningJobs.html index ee859e575d5..2d9ce0d3080 100644 --- a/docs/dyn/aiplatform_v1beta1.projects.locations.hyperparameterTuningJobs.html +++ b/docs/dyn/aiplatform_v1beta1.projects.locations.hyperparameterTuningJobs.html @@ -277,6 +277,9 @@

Method Details

"enableWebAccess": True or False, # Optional. Whether you want Vertex AI to enable [interactive shell access](https://cloud.google.com/vertex-ai/docs/training/monitor-debug-interactive-shell) to training containers. If set to `true`, you can access interactive shells at the URIs given by CustomJob.web_access_uris or Trial.web_access_uris (within HyperparameterTuningJob.trials). "experiment": "A String", # Optional. The Experiment associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}` "experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}` + "models": [ # Optional. The name of the Model resources for which to generate a mapping to artifact URIs. Applicable only to some of the Google-provided custom jobs. Format: `projects/{project}/locations/{location}/models/{model}` In order to retrieve a specific version of the model, also provide the version ID or version alias. Example: `projects/{project}/locations/{location}/models/{model}@2` or `projects/{project}/locations/{location}/models/{model}@golden` If no version ID or alias is specified, the "default" version will be returned. The "default" version alias is created for the first version of the model, and can be moved to other versions later on. There will be exactly one default version. + "A String", + ], "network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. "persistentResourceId": "A String", # Optional. The ID of the PersistentResource in the same Project and Location which to run If this is specified, the job will be run on existing machines held by the PersistentResource instead of on-demand short-live machines. The network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected. "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations @@ -536,6 +539,9 @@

Method Details

"enableWebAccess": True or False, # Optional. Whether you want Vertex AI to enable [interactive shell access](https://cloud.google.com/vertex-ai/docs/training/monitor-debug-interactive-shell) to training containers. If set to `true`, you can access interactive shells at the URIs given by CustomJob.web_access_uris or Trial.web_access_uris (within HyperparameterTuningJob.trials). "experiment": "A String", # Optional. The Experiment associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}` "experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}` + "models": [ # Optional. The name of the Model resources for which to generate a mapping to artifact URIs. Applicable only to some of the Google-provided custom jobs. Format: `projects/{project}/locations/{location}/models/{model}` In order to retrieve a specific version of the model, also provide the version ID or version alias. Example: `projects/{project}/locations/{location}/models/{model}@2` or `projects/{project}/locations/{location}/models/{model}@golden` If no version ID or alias is specified, the "default" version will be returned. The "default" version alias is created for the first version of the model, and can be moved to other versions later on. There will be exactly one default version. + "A String", + ], "network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. "persistentResourceId": "A String", # Optional. The ID of the PersistentResource in the same Project and Location which to run If this is specified, the job will be run on existing machines held by the PersistentResource instead of on-demand short-live machines. The network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected. "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations @@ -837,6 +843,9 @@

Method Details

"enableWebAccess": True or False, # Optional. Whether you want Vertex AI to enable [interactive shell access](https://cloud.google.com/vertex-ai/docs/training/monitor-debug-interactive-shell) to training containers. If set to `true`, you can access interactive shells at the URIs given by CustomJob.web_access_uris or Trial.web_access_uris (within HyperparameterTuningJob.trials). "experiment": "A String", # Optional. The Experiment associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}` "experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}` + "models": [ # Optional. The name of the Model resources for which to generate a mapping to artifact URIs. Applicable only to some of the Google-provided custom jobs. Format: `projects/{project}/locations/{location}/models/{model}` In order to retrieve a specific version of the model, also provide the version ID or version alias. Example: `projects/{project}/locations/{location}/models/{model}@2` or `projects/{project}/locations/{location}/models/{model}@golden` If no version ID or alias is specified, the "default" version will be returned. The "default" version alias is created for the first version of the model, and can be moved to other versions later on. There will be exactly one default version. + "A String", + ], "network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. "persistentResourceId": "A String", # Optional. The ID of the PersistentResource in the same Project and Location which to run If this is specified, the job will be run on existing machines held by the PersistentResource instead of on-demand short-live machines. The network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected. "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations @@ -1109,6 +1118,9 @@

Method Details

"enableWebAccess": True or False, # Optional. Whether you want Vertex AI to enable [interactive shell access](https://cloud.google.com/vertex-ai/docs/training/monitor-debug-interactive-shell) to training containers. If set to `true`, you can access interactive shells at the URIs given by CustomJob.web_access_uris or Trial.web_access_uris (within HyperparameterTuningJob.trials). "experiment": "A String", # Optional. The Experiment associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}` "experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}` + "models": [ # Optional. The name of the Model resources for which to generate a mapping to artifact URIs. Applicable only to some of the Google-provided custom jobs. Format: `projects/{project}/locations/{location}/models/{model}` In order to retrieve a specific version of the model, also provide the version ID or version alias. Example: `projects/{project}/locations/{location}/models/{model}@2` or `projects/{project}/locations/{location}/models/{model}@golden` If no version ID or alias is specified, the "default" version will be returned. The "default" version alias is created for the first version of the model, and can be moved to other versions later on. There will be exactly one default version. + "A String", + ], "network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. "persistentResourceId": "A String", # Optional. The ID of the PersistentResource in the same Project and Location which to run If this is specified, the job will be run on existing machines held by the PersistentResource instead of on-demand short-live machines. The network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected. "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations diff --git a/docs/dyn/aiplatform_v1beta1.projects.locations.nasJobs.html b/docs/dyn/aiplatform_v1beta1.projects.locations.nasJobs.html index 79c50f7623a..f39f32c153c 100644 --- a/docs/dyn/aiplatform_v1beta1.projects.locations.nasJobs.html +++ b/docs/dyn/aiplatform_v1beta1.projects.locations.nasJobs.html @@ -219,6 +219,9 @@

Method Details

"enableWebAccess": True or False, # Optional. Whether you want Vertex AI to enable [interactive shell access](https://cloud.google.com/vertex-ai/docs/training/monitor-debug-interactive-shell) to training containers. If set to `true`, you can access interactive shells at the URIs given by CustomJob.web_access_uris or Trial.web_access_uris (within HyperparameterTuningJob.trials). "experiment": "A String", # Optional. The Experiment associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}` "experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}` + "models": [ # Optional. The name of the Model resources for which to generate a mapping to artifact URIs. Applicable only to some of the Google-provided custom jobs. Format: `projects/{project}/locations/{location}/models/{model}` In order to retrieve a specific version of the model, also provide the version ID or version alias. Example: `projects/{project}/locations/{location}/models/{model}@2` or `projects/{project}/locations/{location}/models/{model}@golden` If no version ID or alias is specified, the "default" version will be returned. The "default" version alias is created for the first version of the model, and can be moved to other versions later on. There will be exactly one default version. + "A String", + ], "network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. "persistentResourceId": "A String", # Optional. The ID of the PersistentResource in the same Project and Location which to run If this is specified, the job will be run on existing machines held by the PersistentResource instead of on-demand short-live machines. The network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected. "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations @@ -298,6 +301,9 @@

Method Details

"enableWebAccess": True or False, # Optional. Whether you want Vertex AI to enable [interactive shell access](https://cloud.google.com/vertex-ai/docs/training/monitor-debug-interactive-shell) to training containers. If set to `true`, you can access interactive shells at the URIs given by CustomJob.web_access_uris or Trial.web_access_uris (within HyperparameterTuningJob.trials). "experiment": "A String", # Optional. The Experiment associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}` "experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}` + "models": [ # Optional. The name of the Model resources for which to generate a mapping to artifact URIs. Applicable only to some of the Google-provided custom jobs. Format: `projects/{project}/locations/{location}/models/{model}` In order to retrieve a specific version of the model, also provide the version ID or version alias. Example: `projects/{project}/locations/{location}/models/{model}@2` or `projects/{project}/locations/{location}/models/{model}@golden` If no version ID or alias is specified, the "default" version will be returned. The "default" version alias is created for the first version of the model, and can be moved to other versions later on. There will be exactly one default version. + "A String", + ], "network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. "persistentResourceId": "A String", # Optional. The ID of the PersistentResource in the same Project and Location which to run If this is specified, the job will be run on existing machines held by the PersistentResource instead of on-demand short-live machines. The network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected. "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations @@ -463,6 +469,9 @@

Method Details

"enableWebAccess": True or False, # Optional. Whether you want Vertex AI to enable [interactive shell access](https://cloud.google.com/vertex-ai/docs/training/monitor-debug-interactive-shell) to training containers. If set to `true`, you can access interactive shells at the URIs given by CustomJob.web_access_uris or Trial.web_access_uris (within HyperparameterTuningJob.trials). "experiment": "A String", # Optional. The Experiment associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}` "experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}` + "models": [ # Optional. The name of the Model resources for which to generate a mapping to artifact URIs. Applicable only to some of the Google-provided custom jobs. Format: `projects/{project}/locations/{location}/models/{model}` In order to retrieve a specific version of the model, also provide the version ID or version alias. Example: `projects/{project}/locations/{location}/models/{model}@2` or `projects/{project}/locations/{location}/models/{model}@golden` If no version ID or alias is specified, the "default" version will be returned. The "default" version alias is created for the first version of the model, and can be moved to other versions later on. There will be exactly one default version. + "A String", + ], "network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. "persistentResourceId": "A String", # Optional. The ID of the PersistentResource in the same Project and Location which to run If this is specified, the job will be run on existing machines held by the PersistentResource instead of on-demand short-live machines. The network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected. "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations @@ -542,6 +551,9 @@

Method Details

"enableWebAccess": True or False, # Optional. Whether you want Vertex AI to enable [interactive shell access](https://cloud.google.com/vertex-ai/docs/training/monitor-debug-interactive-shell) to training containers. If set to `true`, you can access interactive shells at the URIs given by CustomJob.web_access_uris or Trial.web_access_uris (within HyperparameterTuningJob.trials). "experiment": "A String", # Optional. The Experiment associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}` "experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}` + "models": [ # Optional. The name of the Model resources for which to generate a mapping to artifact URIs. Applicable only to some of the Google-provided custom jobs. Format: `projects/{project}/locations/{location}/models/{model}` In order to retrieve a specific version of the model, also provide the version ID or version alias. Example: `projects/{project}/locations/{location}/models/{model}@2` or `projects/{project}/locations/{location}/models/{model}@golden` If no version ID or alias is specified, the "default" version will be returned. The "default" version alias is created for the first version of the model, and can be moved to other versions later on. There will be exactly one default version. + "A String", + ], "network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. "persistentResourceId": "A String", # Optional. The ID of the PersistentResource in the same Project and Location which to run If this is specified, the job will be run on existing machines held by the PersistentResource instead of on-demand short-live machines. The network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected. "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations @@ -749,6 +761,9 @@

Method Details

"enableWebAccess": True or False, # Optional. Whether you want Vertex AI to enable [interactive shell access](https://cloud.google.com/vertex-ai/docs/training/monitor-debug-interactive-shell) to training containers. If set to `true`, you can access interactive shells at the URIs given by CustomJob.web_access_uris or Trial.web_access_uris (within HyperparameterTuningJob.trials). "experiment": "A String", # Optional. The Experiment associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}` "experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}` + "models": [ # Optional. The name of the Model resources for which to generate a mapping to artifact URIs. Applicable only to some of the Google-provided custom jobs. Format: `projects/{project}/locations/{location}/models/{model}` In order to retrieve a specific version of the model, also provide the version ID or version alias. Example: `projects/{project}/locations/{location}/models/{model}@2` or `projects/{project}/locations/{location}/models/{model}@golden` If no version ID or alias is specified, the "default" version will be returned. The "default" version alias is created for the first version of the model, and can be moved to other versions later on. There will be exactly one default version. + "A String", + ], "network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. "persistentResourceId": "A String", # Optional. The ID of the PersistentResource in the same Project and Location which to run If this is specified, the job will be run on existing machines held by the PersistentResource instead of on-demand short-live machines. The network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected. "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations @@ -828,6 +843,9 @@

Method Details

"enableWebAccess": True or False, # Optional. Whether you want Vertex AI to enable [interactive shell access](https://cloud.google.com/vertex-ai/docs/training/monitor-debug-interactive-shell) to training containers. If set to `true`, you can access interactive shells at the URIs given by CustomJob.web_access_uris or Trial.web_access_uris (within HyperparameterTuningJob.trials). "experiment": "A String", # Optional. The Experiment associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}` "experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}` + "models": [ # Optional. The name of the Model resources for which to generate a mapping to artifact URIs. Applicable only to some of the Google-provided custom jobs. Format: `projects/{project}/locations/{location}/models/{model}` In order to retrieve a specific version of the model, also provide the version ID or version alias. Example: `projects/{project}/locations/{location}/models/{model}@2` or `projects/{project}/locations/{location}/models/{model}@golden` If no version ID or alias is specified, the "default" version will be returned. The "default" version alias is created for the first version of the model, and can be moved to other versions later on. There will be exactly one default version. + "A String", + ], "network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. "persistentResourceId": "A String", # Optional. The ID of the PersistentResource in the same Project and Location which to run If this is specified, the job will be run on existing machines held by the PersistentResource instead of on-demand short-live machines. The network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected. "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations @@ -1006,6 +1024,9 @@

Method Details

"enableWebAccess": True or False, # Optional. Whether you want Vertex AI to enable [interactive shell access](https://cloud.google.com/vertex-ai/docs/training/monitor-debug-interactive-shell) to training containers. If set to `true`, you can access interactive shells at the URIs given by CustomJob.web_access_uris or Trial.web_access_uris (within HyperparameterTuningJob.trials). "experiment": "A String", # Optional. The Experiment associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}` "experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}` + "models": [ # Optional. The name of the Model resources for which to generate a mapping to artifact URIs. Applicable only to some of the Google-provided custom jobs. Format: `projects/{project}/locations/{location}/models/{model}` In order to retrieve a specific version of the model, also provide the version ID or version alias. Example: `projects/{project}/locations/{location}/models/{model}@2` or `projects/{project}/locations/{location}/models/{model}@golden` If no version ID or alias is specified, the "default" version will be returned. The "default" version alias is created for the first version of the model, and can be moved to other versions later on. There will be exactly one default version. + "A String", + ], "network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. "persistentResourceId": "A String", # Optional. The ID of the PersistentResource in the same Project and Location which to run If this is specified, the job will be run on existing machines held by the PersistentResource instead of on-demand short-live machines. The network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected. "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations @@ -1085,6 +1106,9 @@

Method Details

"enableWebAccess": True or False, # Optional. Whether you want Vertex AI to enable [interactive shell access](https://cloud.google.com/vertex-ai/docs/training/monitor-debug-interactive-shell) to training containers. If set to `true`, you can access interactive shells at the URIs given by CustomJob.web_access_uris or Trial.web_access_uris (within HyperparameterTuningJob.trials). "experiment": "A String", # Optional. The Experiment associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}` "experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}` + "models": [ # Optional. The name of the Model resources for which to generate a mapping to artifact URIs. Applicable only to some of the Google-provided custom jobs. Format: `projects/{project}/locations/{location}/models/{model}` In order to retrieve a specific version of the model, also provide the version ID or version alias. Example: `projects/{project}/locations/{location}/models/{model}@2` or `projects/{project}/locations/{location}/models/{model}@golden` If no version ID or alias is specified, the "default" version will be returned. The "default" version alias is created for the first version of the model, and can be moved to other versions later on. There will be exactly one default version. + "A String", + ], "network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. "persistentResourceId": "A String", # Optional. The ID of the PersistentResource in the same Project and Location which to run If this is specified, the job will be run on existing machines held by the PersistentResource instead of on-demand short-live machines. The network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected. "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations diff --git a/docs/dyn/aiplatform_v1beta1.projects.locations.publishers.models.html b/docs/dyn/aiplatform_v1beta1.projects.locations.publishers.models.html index 691786e9d39..971544ec693 100644 --- a/docs/dyn/aiplatform_v1beta1.projects.locations.publishers.models.html +++ b/docs/dyn/aiplatform_v1beta1.projects.locations.publishers.models.html @@ -83,6 +83,9 @@

Instance Methods

countTokens(endpoint, body=None, x__xgafv=None)

Perform a token counting.

+

+ getIamPolicy(resource, options_requestedPolicyVersion=None, x__xgafv=None)

+

Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.

predict(endpoint, body=None, x__xgafv=None)

Perform an online prediction.

@@ -149,12 +152,12 @@

Method Details

{ # Request message for PredictionService.CountTokens. "contents": [ # Required. Input content. - { # A single turn in a conversation with the model. - "parts": [ # Required. Ordered parts that make up a message. Parts may have different MIME types. - { # Content part. + { # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. + "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. + { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. "fileData": { # URI based data. # Optional. URI based data. "fileUri": "A String", # Required. URI. - "mimeType": "A String", # Required. Mime type of the data. + "mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "functionCall": { # A predicted [FunctionCall] returned from the model that contains a string representing the [FunctionDeclaration.name] and a structured JSON object containing the parameters and their values. # Optional. A predicted [FunctionCall] returned from the model that contains a string representing the [FunctionDeclaration.name] with the parameters and their values. "args": { # Optional. Required. The function parameters and values in JSON object format. See [FunctionDeclaration.parameters] for parameter details. @@ -168,9 +171,9 @@

Method Details

"a_key": "", # Properties of the object. }, }, - "inlineData": { # Content blob. # Optional. Inlined bytes data. - "data": "A String", # Required. Data. - "mimeType": "A String", # Required. Mime type of the data. + "inlineData": { # Raw media bytes. Text should not be sent as raw bytes, use the 'text' field. # Optional. Inlined bytes data. + "data": "A String", # Required. Raw bytes for media formats. + "mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). "videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data. @@ -179,7 +182,7 @@

Method Details

}, }, ], - "role": "A String", # Optional. The role in a conversation associated with this content. Set it only if a content represents a turn in a conversations, otherwise no need to set role. Possible values: user, model. + "role": "A String", # Optional. The producer of the content. Must be either 'user' or 'model'. Useful to set for multi-turn conversations, otherwise can be left blank or unset. }, ], "instances": [ # Required. The instances that are the input to token counting call. Schema is identical to the prediction schema of the underlying model. @@ -202,6 +205,41 @@

Method Details

} +
+ getIamPolicy(resource, options_requestedPolicyVersion=None, x__xgafv=None) +
Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.
+
+Args:
+  resource: string, REQUIRED: The resource for which the policy is being requested. See [Resource names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field. (required)
+  options_requestedPolicyVersion: integer, Optional. The maximum policy version that will be used to format the policy. Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected. Requests for policies with any conditional role bindings must specify version 3. Policies with no conditional role bindings may specify any valid value or leave the field unset. The policy in the response might use the policy version that you specified, or it might use a lower policy version. For example, if you specify version 3, but the policy has no conditional role bindings, the response uses version 1. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # An Identity and Access Management (IAM) policy, which specifies access controls for Google Cloud resources. A `Policy` is a collection of `bindings`. A `binding` binds one or more `members`, or principals, to a single `role`. Principals can be user accounts, service accounts, Google groups, and domains (such as G Suite). A `role` is a named list of permissions; each `role` can be an IAM predefined role or a user-created custom role. For some types of Google Cloud resources, a `binding` can also specify a `condition`, which is a logical expression that allows access to a resource only if the expression evaluates to `true`. A condition can add constraints based on attributes of the request, the resource, or both. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies). **JSON example:** ``` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 } ``` **YAML example:** ``` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3 ``` For a description of IAM and its features, see the [IAM documentation](https://cloud.google.com/iam/docs/).
+  "bindings": [ # Associates a list of `members`, or principals, with a `role`. Optionally, may specify a `condition` that determines how and when the `bindings` are applied. Each of the `bindings` must contain at least one principal. The `bindings` in a `Policy` can refer to up to 1,500 principals; up to 250 of these principals can be Google groups. Each occurrence of a principal counts towards these limits. For example, if the `bindings` grant 50 different roles to `user:alice@example.com`, and not to any other principal, then you can add another 1,450 principals to the `bindings` in the `Policy`.
+    { # Associates `members`, or principals, with a `role`.
+      "condition": { # Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. The syntax and semantics of CEL are documented at https://github.com/google/cel-spec. Example (Comparison): title: "Summary size limit" description: "Determines if a summary is less than 100 chars" expression: "document.summary.size() < 100" Example (Equality): title: "Requestor is owner" description: "Determines if requestor is the document owner" expression: "document.owner == request.auth.claims.email" Example (Logic): title: "Public documents" description: "Determine whether the document should be publicly visible" expression: "document.type != 'private' && document.type != 'internal'" Example (Data Manipulation): title: "Notification string" description: "Create a notification string with a timestamp." expression: "'New message received at ' + string(document.create_time)" The exact variables and functions that may be referenced within an expression are determined by the service that evaluates it. See the service documentation for additional information. # The condition that is associated with this binding. If the condition evaluates to `true`, then this binding applies to the current request. If the condition evaluates to `false`, then this binding does not apply to the current request. However, a different role binding might grant the same role to one or more of the principals in this binding. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
+        "description": "A String", # Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI.
+        "expression": "A String", # Textual representation of an expression in Common Expression Language syntax.
+        "location": "A String", # Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file.
+        "title": "A String", # Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression.
+      },
+      "members": [ # Specifies the principals requesting access for a Google Cloud resource. `members` can have the following values: * `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account. * `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account. Does not include identities that come from external identity providers (IdPs) through identity federation. * `user:{emailid}`: An email address that represents a specific Google account. For example, `alice@example.com` . * `serviceAccount:{emailid}`: An email address that represents a Google service account. For example, `my-other-app@appspot.gserviceaccount.com`. * `serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]`: An identifier for a [Kubernetes service account](https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts). For example, `my-project.svc.id.goog[my-namespace/my-kubernetes-sa]`. * `group:{emailid}`: An email address that represents a Google group. For example, `admins@example.com`. * `domain:{domain}`: The G Suite domain (primary) that represents all the users of that domain. For example, `google.com` or `example.com`. * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a user that has been recently deleted. For example, `alice@example.com?uid=123456789012345678901`. If the user is recovered, this value reverts to `user:{emailid}` and the recovered user retains the role in the binding. * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. If the service account is undeleted, this value reverts to `serviceAccount:{emailid}` and the undeleted service account retains the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, `admins@example.com?uid=123456789012345678901`. If the group is recovered, this value reverts to `group:{emailid}` and the recovered group retains the role in the binding.
+        "A String",
+      ],
+      "role": "A String", # Role that is assigned to the list of `members`, or principals. For example, `roles/viewer`, `roles/editor`, or `roles/owner`.
+    },
+  ],
+  "etag": "A String", # `etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the `etag` in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An `etag` is returned in the response to `getIamPolicy`, and systems are expected to put that etag in the request to `setIamPolicy` to ensure that their change will be applied to the same version of the policy. **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost.
+  "version": 42, # Specifies the format of the policy. Valid values are `0`, `1`, and `3`. Requests that specify an invalid value are rejected. Any operation that affects conditional role bindings must specify version `3`. This requirement applies to the following operations: * Getting a policy that includes a conditional role binding * Adding a conditional role binding to a policy * Changing a conditional role binding in a policy * Removing any role binding, with or without a condition, from a policy that includes conditions **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost. If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
+}
+
+
predict(endpoint, body=None, x__xgafv=None)
Perform an online prediction.
@@ -476,13 +514,13 @@ 

Method Details

The object takes the form of: { # Request message for [PredictionService.GenerateContent]. - "contents": [ # Required. Input content. - { # A single turn in a conversation with the model. - "parts": [ # Required. Ordered parts that make up a message. Parts may have different MIME types. - { # Content part. + "contents": [ # Required. The content of the current conversation with the model. For single-turn queries, this is a single instance. For multi-turn queries, this is a repeated field that contains conversation history + latest request. + { # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. + "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. + { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. "fileData": { # URI based data. # Optional. URI based data. "fileUri": "A String", # Required. URI. - "mimeType": "A String", # Required. Mime type of the data. + "mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "functionCall": { # A predicted [FunctionCall] returned from the model that contains a string representing the [FunctionDeclaration.name] and a structured JSON object containing the parameters and their values. # Optional. A predicted [FunctionCall] returned from the model that contains a string representing the [FunctionDeclaration.name] with the parameters and their values. "args": { # Optional. Required. The function parameters and values in JSON object format. See [FunctionDeclaration.parameters] for parameter details. @@ -496,9 +534,9 @@

Method Details

"a_key": "", # Properties of the object. }, }, - "inlineData": { # Content blob. # Optional. Inlined bytes data. - "data": "A String", # Required. Data. - "mimeType": "A String", # Required. Mime type of the data. + "inlineData": { # Raw media bytes. Text should not be sent as raw bytes, use the 'text' field. # Optional. Inlined bytes data. + "data": "A String", # Required. Raw bytes for media formats. + "mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). "videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data. @@ -507,20 +545,13 @@

Method Details

}, }, ], - "role": "A String", # Optional. The role in a conversation associated with this content. Set it only if a content represents a turn in a conversations, otherwise no need to set role. Possible values: user, model. + "role": "A String", # Optional. The producer of the content. Must be either 'user' or 'model'. Useful to set for multi-turn conversations, otherwise can be left blank or unset. }, ], "endpoint": "A String", # Required. The name of the Endpoint requested to serve the prediction. Format: `projects/{project}/locations/{location}/endpoints/{endpoint}` "generationConfig": { # Generation config. # Optional. Generation config. "candidateCount": 42, # Optional. Number of candidates to generate. - "echo": True or False, # Optional. Echo. - "frequencyPenalty": 3.14, # Optional. Frequency penalties. - "logitBias": { # Optional. Logit bias. - "a_key": 3.14, - }, - "logprobs": 42, # Optional. Logit probabilities. "maxOutputTokens": 42, # Optional. The maximum number of output tokens to generate per message. - "presencePenalty": 3.14, # Optional. Positive penalties. "stopSequences": [ # Optional. Stop sequences. "A String", ], @@ -534,13 +565,13 @@

Method Details

"threshold": "A String", # Required. The harm block threshold. }, ], - "tools": [ # Optional. Tools that the model may use to generate response. - { # Tool details that the model may use to generate response. + "tools": [ # Optional. A list of `Tools` the model may use to generate the next response. A `Tool` is a piece of code that enables the system to interact with external systems to perform an action, or set of actions, outside of knowledge and scope of the model. The only supported tool is currently `Function` + { # Tool details that the model may use to generate response. A `Tool` is a piece of code that enables the system to interact with external systems to perform an action, or set of actions, outside of knowledge and scope of the model. "functionDeclarations": [ # Optional. One or more function declarations to be passed to the model along with the current user query. Model may decide to call a subset of these functions by populating FunctionCall in the response. User should provide a FunctionResponse for each function call in the next turn. Based on the function responses, Model will generate the final response back to the user. Maximum 64 function declarations can be provided. - { # Function declaration details. + { # Structured representation of a function declaration as defined by the [OpenAPI 3.0 specification](https://spec.openapis.org/oas/v3.0.3). Included in this declaration are the function name and parameters. This FunctionDeclaration is a representation of a block of code that can be used as a `Tool` by the model and executed by the client. "description": "A String", # Optional. Description and purpose of the function. Model uses it to decide how and whether to call the function. "name": "A String", # Required. The name of the function to call. Must start with a letter or an underscore. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. - "parameters": { # Represents a select subset of an OpenAPI 3.0 Schema object. Schema is used to define the format of input/output data. More fields may be added in the future as needed. https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.2.md#schemaObject # Optional. Describes the parameters to this function. Reflects the Open API 3.03 Parameter Object string Key: the name of the parameter. Parameter names are case sensitive. For function with no parameters, this can be left unset. + "parameters": { # Schema is used to define the format of input/output data. Represents a select subset of an [OpenAPI 3.0 schema object](https://spec.openapis.org/oas/v3.0.3#schema). More fields may be added in the future as needed. # Optional. Describes the parameters to this function in JSON Schema Object format. Reflects the Open API 3.03 Parameter Object. string Key: the name of the parameter. Parameter names are case sensitive. Schema Value: the Schema defining the type used for the parameter. For function with no parameters, this can be left unset. Example with 1 required and 1 optional parameter: type: OBJECT properties: param1: type: STRING param2: type: INTEGER required: - param1 "description": "A String", # Optional. The description of the data. "enum": [ # Optional. Possible values of the element of Type.STRING with enum format. For example we can define an Enum Direction as : {type:STRING, format:enum, enum:["EAST", NORTH", "SOUTH", "WEST"]} "A String", @@ -573,7 +604,7 @@

Method Details

{ # Response message for [PredictionService.GenerateContent]. "candidates": [ # Output only. Generated candidates. - { # Generated candidate. + { # A response candidate generated from the model. "citationMetadata": { # A collection of source attributions for a piece of content. # Output only. Source attribution of the generated content. "citations": [ # Output only. List of citations. { # Source attributions for content. @@ -590,12 +621,12 @@

Method Details

}, ], }, - "content": { # A single turn in a conversation with the model. # Output only. Content parts of the candidate. - "parts": [ # Required. Ordered parts that make up a message. Parts may have different MIME types. - { # Content part. + "content": { # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. # Output only. Content parts of the candidate. + "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. + { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. "fileData": { # URI based data. # Optional. URI based data. "fileUri": "A String", # Required. URI. - "mimeType": "A String", # Required. Mime type of the data. + "mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "functionCall": { # A predicted [FunctionCall] returned from the model that contains a string representing the [FunctionDeclaration.name] and a structured JSON object containing the parameters and their values. # Optional. A predicted [FunctionCall] returned from the model that contains a string representing the [FunctionDeclaration.name] with the parameters and their values. "args": { # Optional. Required. The function parameters and values in JSON object format. See [FunctionDeclaration.parameters] for parameter details. @@ -609,9 +640,9 @@

Method Details

"a_key": "", # Properties of the object. }, }, - "inlineData": { # Content blob. # Optional. Inlined bytes data. - "data": "A String", # Required. Data. - "mimeType": "A String", # Required. Mime type of the data. + "inlineData": { # Raw media bytes. Text should not be sent as raw bytes, use the 'text' field. # Optional. Inlined bytes data. + "data": "A String", # Required. Raw bytes for media formats. + "mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). "videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data. @@ -620,12 +651,12 @@

Method Details

}, }, ], - "role": "A String", # Optional. The role in a conversation associated with this content. Set it only if a content represents a turn in a conversations, otherwise no need to set role. Possible values: user, model. + "role": "A String", # Optional. The producer of the content. Must be either 'user' or 'model'. Useful to set for multi-turn conversations, otherwise can be left blank or unset. }, - "finishMessage": "A String", # Output only. A string that describes the filtering behavior in more detail. Only filled when reason is set. - "finishReason": "A String", # Output only. The reason why the model stopped generating tokens. + "finishMessage": "A String", # Output only. Describes the reason the mode stopped generating tokens in more detail. This is only filled when `finish_reason` is set. + "finishReason": "A String", # Output only. The reason why the model stopped generating tokens. If empty, the model has not stopped generating the tokens. "index": 42, # Output only. Index of the candidate. - "safetyRatings": [ # Output only. Safety ratings of the generated content. + "safetyRatings": [ # Output only. List of ratings for the safety of a response candidate. There is at most one rating per category. { # Safety rating corresponding to the generated content. "blocked": True or False, # Output only. Indicates whether the content was filtered out because of this rating. "category": "A String", # Output only. Harm category. diff --git a/docs/dyn/aiplatform_v1beta1.publishers.models.html b/docs/dyn/aiplatform_v1beta1.publishers.models.html index b889bdd6c29..232392ad614 100644 --- a/docs/dyn/aiplatform_v1beta1.publishers.models.html +++ b/docs/dyn/aiplatform_v1beta1.publishers.models.html @@ -123,8 +123,10 @@

Method Details

"parent": { # The information about the parent of a model. # Optional. The parent that this model was customized from. E.g., Vision API, Natural Language API, LaMDA, T5, etc. Foundation models don't have parents. "displayName": "A String", # Required. The display name of the parent. E.g., LaMDA, T5, Vision API, Natural Language API. "reference": { # Reference to a resource. # Optional. The Google Cloud resource name or the URI reference. + "description": "A String", # Description of the resource. "resourceName": "A String", # The resource name of the Google Cloud resource. "uri": "A String", # The URI of the resource. + "useCase": "A String", # Use case (CUJ) of the resource. }, }, "predictSchemata": { # Contains the schemata used in Model's predictions and explanations via PredictionService.Predict, PredictionService.Explain and BatchPredictionJob. # Optional. The schemata that describes formats of the PublisherModel's predictions and explanations as given and returned via PredictionService.Predict. @@ -137,8 +139,10 @@

Method Details

"createApplication": { # The regional resource name or the URI. Key is region, e.g., us-central1, europe-west2, global, etc.. # Optional. Create application using the PublisherModel. "references": { # Required. "a_key": { # Reference to a resource. + "description": "A String", # Description of the resource. "resourceName": "A String", # The resource name of the Google Cloud resource. "uri": "A String", # The URI of the resource. + "useCase": "A String", # Use case (CUJ) of the resource. }, }, "title": "A String", # Required. The title of the regional resource reference. @@ -216,14 +220,17 @@

Method Details

"name": "A String", # Required. The unique name of the large Foundation or pre-built model. Like "chat-bison", "text-bison". Or model name with version ID, like "chat-bison@001", "text-bison@005", etc. }, "modelDisplayName": "A String", # Optional. Default model display name. + "publicArtifactUri": "A String", # Optional. The signed URI for ephemeral Cloud Storage access to model artifact. "sharedResources": "A String", # The resource name of the shared DeploymentResourcePool to deploy on. Format: `projects/{project}/locations/{location}/deploymentResourcePools/{deployment_resource_pool}` "title": "A String", # Required. The title of the regional resource reference. }, "openEvaluationPipeline": { # The regional resource name or the URI. Key is region, e.g., us-central1, europe-west2, global, etc.. # Optional. Open evaluation pipeline of the PublisherModel. "references": { # Required. "a_key": { # Reference to a resource. + "description": "A String", # Description of the resource. "resourceName": "A String", # The resource name of the Google Cloud resource. "uri": "A String", # The URI of the resource. + "useCase": "A String", # Use case (CUJ) of the resource. }, }, "title": "A String", # Required. The title of the regional resource reference. @@ -231,8 +238,10 @@

Method Details

"openFineTuningPipeline": { # The regional resource name or the URI. Key is region, e.g., us-central1, europe-west2, global, etc.. # Optional. Open fine-tuning pipeline of the PublisherModel. "references": { # Required. "a_key": { # Reference to a resource. + "description": "A String", # Description of the resource. "resourceName": "A String", # The resource name of the Google Cloud resource. "uri": "A String", # The URI of the resource. + "useCase": "A String", # Use case (CUJ) of the resource. }, }, "title": "A String", # Required. The title of the regional resource reference. @@ -240,8 +249,10 @@

Method Details

"openGenerationAiStudio": { # The regional resource name or the URI. Key is region, e.g., us-central1, europe-west2, global, etc.. # Optional. Open in Generation AI Studio. "references": { # Required. "a_key": { # Reference to a resource. + "description": "A String", # Description of the resource. "resourceName": "A String", # The resource name of the Google Cloud resource. "uri": "A String", # The URI of the resource. + "useCase": "A String", # Use case (CUJ) of the resource. }, }, "title": "A String", # Required. The title of the regional resource reference. @@ -249,8 +260,10 @@

Method Details

"openGenie": { # The regional resource name or the URI. Key is region, e.g., us-central1, europe-west2, global, etc.. # Optional. Open Genie / Playground. "references": { # Required. "a_key": { # Reference to a resource. + "description": "A String", # Description of the resource. "resourceName": "A String", # The resource name of the Google Cloud resource. "uri": "A String", # The URI of the resource. + "useCase": "A String", # Use case (CUJ) of the resource. }, }, "title": "A String", # Required. The title of the regional resource reference. @@ -258,8 +271,10 @@

Method Details

"openNotebook": { # The regional resource name or the URI. Key is region, e.g., us-central1, europe-west2, global, etc.. # Optional. Open notebook of the PublisherModel. "references": { # Required. "a_key": { # Reference to a resource. + "description": "A String", # Description of the resource. "resourceName": "A String", # The resource name of the Google Cloud resource. "uri": "A String", # The URI of the resource. + "useCase": "A String", # Use case (CUJ) of the resource. }, }, "title": "A String", # Required. The title of the regional resource reference. @@ -267,8 +282,10 @@

Method Details

"openPromptTuningPipeline": { # The regional resource name or the URI. Key is region, e.g., us-central1, europe-west2, global, etc.. # Optional. Open prompt-tuning pipeline of the PublisherModel. "references": { # Required. "a_key": { # Reference to a resource. + "description": "A String", # Description of the resource. "resourceName": "A String", # The resource name of the Google Cloud resource. "uri": "A String", # The URI of the resource. + "useCase": "A String", # Use case (CUJ) of the resource. }, }, "title": "A String", # Required. The title of the regional resource reference. @@ -276,8 +293,10 @@

Method Details

"requestAccess": { # The regional resource name or the URI. Key is region, e.g., us-central1, europe-west2, global, etc.. # Optional. Request for access. "references": { # Required. "a_key": { # Reference to a resource. + "description": "A String", # Description of the resource. "resourceName": "A String", # The resource name of the Google Cloud resource. "uri": "A String", # The URI of the resource. + "useCase": "A String", # Use case (CUJ) of the resource. }, }, "title": "A String", # Required. The title of the regional resource reference. @@ -293,6 +312,7 @@

Method Details

}, }, "versionId": "A String", # Output only. Immutable. The version ID of the PublisherModel. A new version is committed when a new model version is uploaded under an existing model id. It is an auto-incrementing decimal number in string representation. + "versionState": "A String", # Optional. Indicates the state of the model version. }
@@ -334,8 +354,10 @@

Method Details

"parent": { # The information about the parent of a model. # Optional. The parent that this model was customized from. E.g., Vision API, Natural Language API, LaMDA, T5, etc. Foundation models don't have parents. "displayName": "A String", # Required. The display name of the parent. E.g., LaMDA, T5, Vision API, Natural Language API. "reference": { # Reference to a resource. # Optional. The Google Cloud resource name or the URI reference. + "description": "A String", # Description of the resource. "resourceName": "A String", # The resource name of the Google Cloud resource. "uri": "A String", # The URI of the resource. + "useCase": "A String", # Use case (CUJ) of the resource. }, }, "predictSchemata": { # Contains the schemata used in Model's predictions and explanations via PredictionService.Predict, PredictionService.Explain and BatchPredictionJob. # Optional. The schemata that describes formats of the PublisherModel's predictions and explanations as given and returned via PredictionService.Predict. @@ -348,8 +370,10 @@

Method Details

"createApplication": { # The regional resource name or the URI. Key is region, e.g., us-central1, europe-west2, global, etc.. # Optional. Create application using the PublisherModel. "references": { # Required. "a_key": { # Reference to a resource. + "description": "A String", # Description of the resource. "resourceName": "A String", # The resource name of the Google Cloud resource. "uri": "A String", # The URI of the resource. + "useCase": "A String", # Use case (CUJ) of the resource. }, }, "title": "A String", # Required. The title of the regional resource reference. @@ -427,14 +451,17 @@

Method Details

"name": "A String", # Required. The unique name of the large Foundation or pre-built model. Like "chat-bison", "text-bison". Or model name with version ID, like "chat-bison@001", "text-bison@005", etc. }, "modelDisplayName": "A String", # Optional. Default model display name. + "publicArtifactUri": "A String", # Optional. The signed URI for ephemeral Cloud Storage access to model artifact. "sharedResources": "A String", # The resource name of the shared DeploymentResourcePool to deploy on. Format: `projects/{project}/locations/{location}/deploymentResourcePools/{deployment_resource_pool}` "title": "A String", # Required. The title of the regional resource reference. }, "openEvaluationPipeline": { # The regional resource name or the URI. Key is region, e.g., us-central1, europe-west2, global, etc.. # Optional. Open evaluation pipeline of the PublisherModel. "references": { # Required. "a_key": { # Reference to a resource. + "description": "A String", # Description of the resource. "resourceName": "A String", # The resource name of the Google Cloud resource. "uri": "A String", # The URI of the resource. + "useCase": "A String", # Use case (CUJ) of the resource. }, }, "title": "A String", # Required. The title of the regional resource reference. @@ -442,8 +469,10 @@

Method Details

"openFineTuningPipeline": { # The regional resource name or the URI. Key is region, e.g., us-central1, europe-west2, global, etc.. # Optional. Open fine-tuning pipeline of the PublisherModel. "references": { # Required. "a_key": { # Reference to a resource. + "description": "A String", # Description of the resource. "resourceName": "A String", # The resource name of the Google Cloud resource. "uri": "A String", # The URI of the resource. + "useCase": "A String", # Use case (CUJ) of the resource. }, }, "title": "A String", # Required. The title of the regional resource reference. @@ -451,8 +480,10 @@

Method Details

"openGenerationAiStudio": { # The regional resource name or the URI. Key is region, e.g., us-central1, europe-west2, global, etc.. # Optional. Open in Generation AI Studio. "references": { # Required. "a_key": { # Reference to a resource. + "description": "A String", # Description of the resource. "resourceName": "A String", # The resource name of the Google Cloud resource. "uri": "A String", # The URI of the resource. + "useCase": "A String", # Use case (CUJ) of the resource. }, }, "title": "A String", # Required. The title of the regional resource reference. @@ -460,8 +491,10 @@

Method Details

"openGenie": { # The regional resource name or the URI. Key is region, e.g., us-central1, europe-west2, global, etc.. # Optional. Open Genie / Playground. "references": { # Required. "a_key": { # Reference to a resource. + "description": "A String", # Description of the resource. "resourceName": "A String", # The resource name of the Google Cloud resource. "uri": "A String", # The URI of the resource. + "useCase": "A String", # Use case (CUJ) of the resource. }, }, "title": "A String", # Required. The title of the regional resource reference. @@ -469,8 +502,10 @@

Method Details

"openNotebook": { # The regional resource name or the URI. Key is region, e.g., us-central1, europe-west2, global, etc.. # Optional. Open notebook of the PublisherModel. "references": { # Required. "a_key": { # Reference to a resource. + "description": "A String", # Description of the resource. "resourceName": "A String", # The resource name of the Google Cloud resource. "uri": "A String", # The URI of the resource. + "useCase": "A String", # Use case (CUJ) of the resource. }, }, "title": "A String", # Required. The title of the regional resource reference. @@ -478,8 +513,10 @@

Method Details

"openPromptTuningPipeline": { # The regional resource name or the URI. Key is region, e.g., us-central1, europe-west2, global, etc.. # Optional. Open prompt-tuning pipeline of the PublisherModel. "references": { # Required. "a_key": { # Reference to a resource. + "description": "A String", # Description of the resource. "resourceName": "A String", # The resource name of the Google Cloud resource. "uri": "A String", # The URI of the resource. + "useCase": "A String", # Use case (CUJ) of the resource. }, }, "title": "A String", # Required. The title of the regional resource reference. @@ -487,8 +524,10 @@

Method Details

"requestAccess": { # The regional resource name or the URI. Key is region, e.g., us-central1, europe-west2, global, etc.. # Optional. Request for access. "references": { # Required. "a_key": { # Reference to a resource. + "description": "A String", # Description of the resource. "resourceName": "A String", # The resource name of the Google Cloud resource. "uri": "A String", # The URI of the resource. + "useCase": "A String", # Use case (CUJ) of the resource. }, }, "title": "A String", # Required. The title of the regional resource reference. @@ -504,6 +543,7 @@

Method Details

}, }, "versionId": "A String", # Output only. Immutable. The version ID of the PublisherModel. A new version is committed when a new model version is uploaded under an existing model id. It is an auto-incrementing decimal number in string representation. + "versionState": "A String", # Optional. Indicates the state of the model version. }, ], } diff --git a/docs/dyn/alloydb_v1.projects.locations.backups.html b/docs/dyn/alloydb_v1.projects.locations.backups.html index c5ddb3c2dbf..6cccaa0a7bf 100644 --- a/docs/dyn/alloydb_v1.projects.locations.backups.html +++ b/docs/dyn/alloydb_v1.projects.locations.backups.html @@ -141,6 +141,7 @@

Method Details

}, "name": "A String", # Output only. The name of the backup resource with the format: * projects/{project}/locations/{region}/backups/{backup_id} where the cluster and backup ID segments should satisfy the regex expression `[a-z]([a-z0-9-]{0,61}[a-z0-9])?`, e.g. 1-63 characters of lowercase letters, numbers, and dashes, starting with a letter, and ending with a letter or number. For more details see https://google.aip.dev/122. The prefix of the backup resource name is the name of the parent resource: * projects/{project}/locations/{region} "reconciling": True or False, # Output only. Reconciling (https://google.aip.dev/128#reconciliation), if true, indicates that the service is actively updating the resource. This can happen due to user-triggered updates or system actions like failover or maintenance. + "satisfiesPzs": True or False, # Output only. Reserved for future use. "sizeBytes": "A String", # Output only. The size of the backup in bytes. "state": "A String", # Output only. The current state of the backup. "type": "A String", # The backup type, which suggests the trigger for the backup. @@ -263,6 +264,7 @@

Method Details

}, "name": "A String", # Output only. The name of the backup resource with the format: * projects/{project}/locations/{region}/backups/{backup_id} where the cluster and backup ID segments should satisfy the regex expression `[a-z]([a-z0-9-]{0,61}[a-z0-9])?`, e.g. 1-63 characters of lowercase letters, numbers, and dashes, starting with a letter, and ending with a letter or number. For more details see https://google.aip.dev/122. The prefix of the backup resource name is the name of the parent resource: * projects/{project}/locations/{region} "reconciling": True or False, # Output only. Reconciling (https://google.aip.dev/128#reconciliation), if true, indicates that the service is actively updating the resource. This can happen due to user-triggered updates or system actions like failover or maintenance. + "satisfiesPzs": True or False, # Output only. Reserved for future use. "sizeBytes": "A String", # Output only. The size of the backup in bytes. "state": "A String", # Output only. The current state of the backup. "type": "A String", # The backup type, which suggests the trigger for the backup. @@ -322,6 +324,7 @@

Method Details

}, "name": "A String", # Output only. The name of the backup resource with the format: * projects/{project}/locations/{region}/backups/{backup_id} where the cluster and backup ID segments should satisfy the regex expression `[a-z]([a-z0-9-]{0,61}[a-z0-9])?`, e.g. 1-63 characters of lowercase letters, numbers, and dashes, starting with a letter, and ending with a letter or number. For more details see https://google.aip.dev/122. The prefix of the backup resource name is the name of the parent resource: * projects/{project}/locations/{region} "reconciling": True or False, # Output only. Reconciling (https://google.aip.dev/128#reconciliation), if true, indicates that the service is actively updating the resource. This can happen due to user-triggered updates or system actions like failover or maintenance. + "satisfiesPzs": True or False, # Output only. Reserved for future use. "sizeBytes": "A String", # Output only. The size of the backup in bytes. "state": "A String", # Output only. The current state of the backup. "type": "A String", # The backup type, which suggests the trigger for the backup. @@ -390,6 +393,7 @@

Method Details

}, "name": "A String", # Output only. The name of the backup resource with the format: * projects/{project}/locations/{region}/backups/{backup_id} where the cluster and backup ID segments should satisfy the regex expression `[a-z]([a-z0-9-]{0,61}[a-z0-9])?`, e.g. 1-63 characters of lowercase letters, numbers, and dashes, starting with a letter, and ending with a letter or number. For more details see https://google.aip.dev/122. The prefix of the backup resource name is the name of the parent resource: * projects/{project}/locations/{region} "reconciling": True or False, # Output only. Reconciling (https://google.aip.dev/128#reconciliation), if true, indicates that the service is actively updating the resource. This can happen due to user-triggered updates or system actions like failover or maintenance. + "satisfiesPzs": True or False, # Output only. Reserved for future use. "sizeBytes": "A String", # Output only. The size of the backup in bytes. "state": "A String", # Output only. The current state of the backup. "type": "A String", # The backup type, which suggests the trigger for the backup. diff --git a/docs/dyn/alloydb_v1.projects.locations.clusters.html b/docs/dyn/alloydb_v1.projects.locations.clusters.html index 1fcae714c8c..a2488bf2c80 100644 --- a/docs/dyn/alloydb_v1.projects.locations.clusters.html +++ b/docs/dyn/alloydb_v1.projects.locations.clusters.html @@ -229,6 +229,7 @@

Method Details

], }, "reconciling": True or False, # Output only. Reconciling (https://google.aip.dev/128#reconciliation). Set to true if the current state of Cluster does not match the user's intended state, and the service is actively updating the resource to reconcile them. This can happen due to user-triggered updates or system actions like failover or maintenance. + "satisfiesPzs": True or False, # Output only. Reserved for future use. "secondaryConfig": { # Configuration information for the secondary cluster. This should be set if and only if the cluster is of type SECONDARY. # Cross Region replication config specific to SECONDARY cluster. "primaryClusterName": "A String", # The name of the primary cluster name with the format: * projects/{project}/locations/{region}/clusters/{cluster_id} }, @@ -379,6 +380,7 @@

Method Details

], }, "reconciling": True or False, # Output only. Reconciling (https://google.aip.dev/128#reconciliation). Set to true if the current state of Cluster does not match the user's intended state, and the service is actively updating the resource to reconcile them. This can happen due to user-triggered updates or system actions like failover or maintenance. + "satisfiesPzs": True or False, # Output only. Reserved for future use. "secondaryConfig": { # Configuration information for the secondary cluster. This should be set if and only if the cluster is of type SECONDARY. # Cross Region replication config specific to SECONDARY cluster. "primaryClusterName": "A String", # The name of the primary cluster name with the format: * projects/{project}/locations/{region}/clusters/{cluster_id} }, @@ -610,6 +612,7 @@

Method Details

], }, "reconciling": True or False, # Output only. Reconciling (https://google.aip.dev/128#reconciliation). Set to true if the current state of Cluster does not match the user's intended state, and the service is actively updating the resource to reconcile them. This can happen due to user-triggered updates or system actions like failover or maintenance. + "satisfiesPzs": True or False, # Output only. Reserved for future use. "secondaryConfig": { # Configuration information for the secondary cluster. This should be set if and only if the cluster is of type SECONDARY. # Cross Region replication config specific to SECONDARY cluster. "primaryClusterName": "A String", # The name of the primary cluster name with the format: * projects/{project}/locations/{region}/clusters/{cluster_id} }, @@ -740,6 +743,7 @@

Method Details

], }, "reconciling": True or False, # Output only. Reconciling (https://google.aip.dev/128#reconciliation). Set to true if the current state of Cluster does not match the user's intended state, and the service is actively updating the resource to reconcile them. This can happen due to user-triggered updates or system actions like failover or maintenance. + "satisfiesPzs": True or False, # Output only. Reserved for future use. "secondaryConfig": { # Configuration information for the secondary cluster. This should be set if and only if the cluster is of type SECONDARY. # Cross Region replication config specific to SECONDARY cluster. "primaryClusterName": "A String", # The name of the primary cluster name with the format: * projects/{project}/locations/{region}/clusters/{cluster_id} }, @@ -879,6 +883,7 @@

Method Details

], }, "reconciling": True or False, # Output only. Reconciling (https://google.aip.dev/128#reconciliation). Set to true if the current state of Cluster does not match the user's intended state, and the service is actively updating the resource to reconcile them. This can happen due to user-triggered updates or system actions like failover or maintenance. + "satisfiesPzs": True or False, # Output only. Reserved for future use. "secondaryConfig": { # Configuration information for the secondary cluster. This should be set if and only if the cluster is of type SECONDARY. # Cross Region replication config specific to SECONDARY cluster. "primaryClusterName": "A String", # The name of the primary cluster name with the format: * projects/{project}/locations/{region}/clusters/{cluster_id} }, @@ -1079,6 +1084,7 @@

Method Details

], }, "reconciling": True or False, # Output only. Reconciling (https://google.aip.dev/128#reconciliation). Set to true if the current state of Cluster does not match the user's intended state, and the service is actively updating the resource to reconcile them. This can happen due to user-triggered updates or system actions like failover or maintenance. + "satisfiesPzs": True or False, # Output only. Reserved for future use. "secondaryConfig": { # Configuration information for the secondary cluster. This should be set if and only if the cluster is of type SECONDARY. # Cross Region replication config specific to SECONDARY cluster. "primaryClusterName": "A String", # The name of the primary cluster name with the format: * projects/{project}/locations/{region}/clusters/{cluster_id} }, diff --git a/docs/dyn/alloydb_v1.projects.locations.clusters.instances.html b/docs/dyn/alloydb_v1.projects.locations.clusters.instances.html index 18a3b2b141b..d52e4e364c5 100644 --- a/docs/dyn/alloydb_v1.projects.locations.clusters.instances.html +++ b/docs/dyn/alloydb_v1.projects.locations.clusters.instances.html @@ -172,6 +172,7 @@

Method Details

"nodeCount": 42, # Read capacity, i.e. number of nodes in a read pool instance. }, "reconciling": True or False, # Output only. Reconciling (https://google.aip.dev/128#reconciliation). Set to true if the current state of Instance does not match the user's intended state, and the service is actively updating the resource to reconcile them. This can happen due to user-triggered updates or system actions like failover or maintenance. + "satisfiesPzs": True or False, # Output only. Reserved for future use. "state": "A String", # Output only. The current serving state of the instance. "uid": "A String", # Output only. The system-generated UID of the resource. The UID is assigned when the resource is created, and it is retained until it is deleted. "updateTime": "A String", # Output only. Update time stamp @@ -271,6 +272,7 @@

Method Details

"nodeCount": 42, # Read capacity, i.e. number of nodes in a read pool instance. }, "reconciling": True or False, # Output only. Reconciling (https://google.aip.dev/128#reconciliation). Set to true if the current state of Instance does not match the user's intended state, and the service is actively updating the resource to reconcile them. This can happen due to user-triggered updates or system actions like failover or maintenance. + "satisfiesPzs": True or False, # Output only. Reserved for future use. "state": "A String", # Output only. The current serving state of the instance. "uid": "A String", # Output only. The system-generated UID of the resource. The UID is assigned when the resource is created, and it is retained until it is deleted. "updateTime": "A String", # Output only. Update time stamp @@ -461,6 +463,7 @@

Method Details

"nodeCount": 42, # Read capacity, i.e. number of nodes in a read pool instance. }, "reconciling": True or False, # Output only. Reconciling (https://google.aip.dev/128#reconciliation). Set to true if the current state of Instance does not match the user's intended state, and the service is actively updating the resource to reconcile them. This can happen due to user-triggered updates or system actions like failover or maintenance. + "satisfiesPzs": True or False, # Output only. Reserved for future use. "state": "A String", # Output only. The current serving state of the instance. "uid": "A String", # Output only. The system-generated UID of the resource. The UID is assigned when the resource is created, and it is retained until it is deleted. "updateTime": "A String", # Output only. Update time stamp @@ -606,6 +609,7 @@

Method Details

"nodeCount": 42, # Read capacity, i.e. number of nodes in a read pool instance. }, "reconciling": True or False, # Output only. Reconciling (https://google.aip.dev/128#reconciliation). Set to true if the current state of Instance does not match the user's intended state, and the service is actively updating the resource to reconcile them. This can happen due to user-triggered updates or system actions like failover or maintenance. + "satisfiesPzs": True or False, # Output only. Reserved for future use. "state": "A String", # Output only. The current serving state of the instance. "uid": "A String", # Output only. The system-generated UID of the resource. The UID is assigned when the resource is created, and it is retained until it is deleted. "updateTime": "A String", # Output only. Update time stamp @@ -694,6 +698,7 @@

Method Details

"nodeCount": 42, # Read capacity, i.e. number of nodes in a read pool instance. }, "reconciling": True or False, # Output only. Reconciling (https://google.aip.dev/128#reconciliation). Set to true if the current state of Instance does not match the user's intended state, and the service is actively updating the resource to reconcile them. This can happen due to user-triggered updates or system actions like failover or maintenance. + "satisfiesPzs": True or False, # Output only. Reserved for future use. "state": "A String", # Output only. The current serving state of the instance. "uid": "A String", # Output only. The system-generated UID of the resource. The UID is assigned when the resource is created, and it is retained until it is deleted. "updateTime": "A String", # Output only. Update time stamp diff --git a/docs/dyn/alloydb_v1alpha.projects.locations.backups.html b/docs/dyn/alloydb_v1alpha.projects.locations.backups.html index 3f5b7f7e890..79806e1f957 100644 --- a/docs/dyn/alloydb_v1alpha.projects.locations.backups.html +++ b/docs/dyn/alloydb_v1alpha.projects.locations.backups.html @@ -141,6 +141,7 @@

Method Details

}, "name": "A String", # Output only. The name of the backup resource with the format: * projects/{project}/locations/{region}/backups/{backup_id} where the cluster and backup ID segments should satisfy the regex expression `[a-z]([a-z0-9-]{0,61}[a-z0-9])?`, e.g. 1-63 characters of lowercase letters, numbers, and dashes, starting with a letter, and ending with a letter or number. For more details see https://google.aip.dev/122. The prefix of the backup resource name is the name of the parent resource: * projects/{project}/locations/{region} "reconciling": True or False, # Output only. Reconciling (https://google.aip.dev/128#reconciliation), if true, indicates that the service is actively updating the resource. This can happen due to user-triggered updates or system actions like failover or maintenance. + "satisfiesPzi": True or False, # Output only. Reserved for future use. "satisfiesPzs": True or False, # Output only. Reserved for future use. "sizeBytes": "A String", # Output only. The size of the backup in bytes. "state": "A String", # Output only. The current state of the backup. @@ -264,6 +265,7 @@

Method Details

}, "name": "A String", # Output only. The name of the backup resource with the format: * projects/{project}/locations/{region}/backups/{backup_id} where the cluster and backup ID segments should satisfy the regex expression `[a-z]([a-z0-9-]{0,61}[a-z0-9])?`, e.g. 1-63 characters of lowercase letters, numbers, and dashes, starting with a letter, and ending with a letter or number. For more details see https://google.aip.dev/122. The prefix of the backup resource name is the name of the parent resource: * projects/{project}/locations/{region} "reconciling": True or False, # Output only. Reconciling (https://google.aip.dev/128#reconciliation), if true, indicates that the service is actively updating the resource. This can happen due to user-triggered updates or system actions like failover or maintenance. + "satisfiesPzi": True or False, # Output only. Reserved for future use. "satisfiesPzs": True or False, # Output only. Reserved for future use. "sizeBytes": "A String", # Output only. The size of the backup in bytes. "state": "A String", # Output only. The current state of the backup. @@ -324,6 +326,7 @@

Method Details

}, "name": "A String", # Output only. The name of the backup resource with the format: * projects/{project}/locations/{region}/backups/{backup_id} where the cluster and backup ID segments should satisfy the regex expression `[a-z]([a-z0-9-]{0,61}[a-z0-9])?`, e.g. 1-63 characters of lowercase letters, numbers, and dashes, starting with a letter, and ending with a letter or number. For more details see https://google.aip.dev/122. The prefix of the backup resource name is the name of the parent resource: * projects/{project}/locations/{region} "reconciling": True or False, # Output only. Reconciling (https://google.aip.dev/128#reconciliation), if true, indicates that the service is actively updating the resource. This can happen due to user-triggered updates or system actions like failover or maintenance. + "satisfiesPzi": True or False, # Output only. Reserved for future use. "satisfiesPzs": True or False, # Output only. Reserved for future use. "sizeBytes": "A String", # Output only. The size of the backup in bytes. "state": "A String", # Output only. The current state of the backup. @@ -393,6 +396,7 @@

Method Details

}, "name": "A String", # Output only. The name of the backup resource with the format: * projects/{project}/locations/{region}/backups/{backup_id} where the cluster and backup ID segments should satisfy the regex expression `[a-z]([a-z0-9-]{0,61}[a-z0-9])?`, e.g. 1-63 characters of lowercase letters, numbers, and dashes, starting with a letter, and ending with a letter or number. For more details see https://google.aip.dev/122. The prefix of the backup resource name is the name of the parent resource: * projects/{project}/locations/{region} "reconciling": True or False, # Output only. Reconciling (https://google.aip.dev/128#reconciliation), if true, indicates that the service is actively updating the resource. This can happen due to user-triggered updates or system actions like failover or maintenance. + "satisfiesPzi": True or False, # Output only. Reserved for future use. "satisfiesPzs": True or False, # Output only. Reserved for future use. "sizeBytes": "A String", # Output only. The size of the backup in bytes. "state": "A String", # Output only. The current state of the backup. diff --git a/docs/dyn/alloydb_v1alpha.projects.locations.clusters.html b/docs/dyn/alloydb_v1alpha.projects.locations.clusters.html index 02590a949cf..6780bc49dc5 100644 --- a/docs/dyn/alloydb_v1alpha.projects.locations.clusters.html +++ b/docs/dyn/alloydb_v1alpha.projects.locations.clusters.html @@ -232,6 +232,7 @@

Method Details

"pscEnabled": True or False, # Optional. Create an instance that allows connections from Private Service Connect endpoints to the instance. }, "reconciling": True or False, # Output only. Reconciling (https://google.aip.dev/128#reconciliation). Set to true if the current state of Cluster does not match the user's intended state, and the service is actively updating the resource to reconcile them. This can happen due to user-triggered updates or system actions like failover or maintenance. + "satisfiesPzi": True or False, # Output only. Reserved for future use. "satisfiesPzs": True or False, # Output only. Reserved for future use. "secondaryConfig": { # Configuration information for the secondary cluster. This should be set if and only if the cluster is of type SECONDARY. # Cross Region replication config specific to SECONDARY cluster. "primaryClusterName": "A String", # The name of the primary cluster name with the format: * projects/{project}/locations/{region}/clusters/{cluster_id} @@ -386,6 +387,7 @@

Method Details

"pscEnabled": True or False, # Optional. Create an instance that allows connections from Private Service Connect endpoints to the instance. }, "reconciling": True or False, # Output only. Reconciling (https://google.aip.dev/128#reconciliation). Set to true if the current state of Cluster does not match the user's intended state, and the service is actively updating the resource to reconcile them. This can happen due to user-triggered updates or system actions like failover or maintenance. + "satisfiesPzi": True or False, # Output only. Reserved for future use. "satisfiesPzs": True or False, # Output only. Reserved for future use. "secondaryConfig": { # Configuration information for the secondary cluster. This should be set if and only if the cluster is of type SECONDARY. # Cross Region replication config specific to SECONDARY cluster. "primaryClusterName": "A String", # The name of the primary cluster name with the format: * projects/{project}/locations/{region}/clusters/{cluster_id} @@ -623,6 +625,7 @@

Method Details

"pscEnabled": True or False, # Optional. Create an instance that allows connections from Private Service Connect endpoints to the instance. }, "reconciling": True or False, # Output only. Reconciling (https://google.aip.dev/128#reconciliation). Set to true if the current state of Cluster does not match the user's intended state, and the service is actively updating the resource to reconcile them. This can happen due to user-triggered updates or system actions like failover or maintenance. + "satisfiesPzi": True or False, # Output only. Reserved for future use. "satisfiesPzs": True or False, # Output only. Reserved for future use. "secondaryConfig": { # Configuration information for the secondary cluster. This should be set if and only if the cluster is of type SECONDARY. # Cross Region replication config specific to SECONDARY cluster. "primaryClusterName": "A String", # The name of the primary cluster name with the format: * projects/{project}/locations/{region}/clusters/{cluster_id} @@ -757,6 +760,7 @@

Method Details

"pscEnabled": True or False, # Optional. Create an instance that allows connections from Private Service Connect endpoints to the instance. }, "reconciling": True or False, # Output only. Reconciling (https://google.aip.dev/128#reconciliation). Set to true if the current state of Cluster does not match the user's intended state, and the service is actively updating the resource to reconcile them. This can happen due to user-triggered updates or system actions like failover or maintenance. + "satisfiesPzi": True or False, # Output only. Reserved for future use. "satisfiesPzs": True or False, # Output only. Reserved for future use. "secondaryConfig": { # Configuration information for the secondary cluster. This should be set if and only if the cluster is of type SECONDARY. # Cross Region replication config specific to SECONDARY cluster. "primaryClusterName": "A String", # The name of the primary cluster name with the format: * projects/{project}/locations/{region}/clusters/{cluster_id} @@ -900,6 +904,7 @@

Method Details

"pscEnabled": True or False, # Optional. Create an instance that allows connections from Private Service Connect endpoints to the instance. }, "reconciling": True or False, # Output only. Reconciling (https://google.aip.dev/128#reconciliation). Set to true if the current state of Cluster does not match the user's intended state, and the service is actively updating the resource to reconcile them. This can happen due to user-triggered updates or system actions like failover or maintenance. + "satisfiesPzi": True or False, # Output only. Reserved for future use. "satisfiesPzs": True or False, # Output only. Reserved for future use. "secondaryConfig": { # Configuration information for the secondary cluster. This should be set if and only if the cluster is of type SECONDARY. # Cross Region replication config specific to SECONDARY cluster. "primaryClusterName": "A String", # The name of the primary cluster name with the format: * projects/{project}/locations/{region}/clusters/{cluster_id} @@ -1104,6 +1109,7 @@

Method Details

"pscEnabled": True or False, # Optional. Create an instance that allows connections from Private Service Connect endpoints to the instance. }, "reconciling": True or False, # Output only. Reconciling (https://google.aip.dev/128#reconciliation). Set to true if the current state of Cluster does not match the user's intended state, and the service is actively updating the resource to reconcile them. This can happen due to user-triggered updates or system actions like failover or maintenance. + "satisfiesPzi": True or False, # Output only. Reserved for future use. "satisfiesPzs": True or False, # Output only. Reserved for future use. "secondaryConfig": { # Configuration information for the secondary cluster. This should be set if and only if the cluster is of type SECONDARY. # Cross Region replication config specific to SECONDARY cluster. "primaryClusterName": "A String", # The name of the primary cluster name with the format: * projects/{project}/locations/{region}/clusters/{cluster_id} diff --git a/docs/dyn/alloydb_v1alpha.projects.locations.clusters.instances.html b/docs/dyn/alloydb_v1alpha.projects.locations.clusters.instances.html index 460ecf394bd..4da98e2f227 100644 --- a/docs/dyn/alloydb_v1alpha.projects.locations.clusters.instances.html +++ b/docs/dyn/alloydb_v1alpha.projects.locations.clusters.instances.html @@ -172,6 +172,7 @@

Method Details

"nodeCount": 42, # Read capacity, i.e. number of nodes in a read pool instance. }, "reconciling": True or False, # Output only. Reconciling (https://google.aip.dev/128#reconciliation). Set to true if the current state of Instance does not match the user's intended state, and the service is actively updating the resource to reconcile them. This can happen due to user-triggered updates or system actions like failover or maintenance. + "satisfiesPzi": True or False, # Output only. Reserved for future use. "satisfiesPzs": True or False, # Output only. Reserved for future use. "state": "A String", # Output only. The current serving state of the instance. "uid": "A String", # Output only. The system-generated UID of the resource. The UID is assigned when the resource is created, and it is retained until it is deleted. @@ -275,6 +276,7 @@

Method Details

"nodeCount": 42, # Read capacity, i.e. number of nodes in a read pool instance. }, "reconciling": True or False, # Output only. Reconciling (https://google.aip.dev/128#reconciliation). Set to true if the current state of Instance does not match the user's intended state, and the service is actively updating the resource to reconcile them. This can happen due to user-triggered updates or system actions like failover or maintenance. + "satisfiesPzi": True or False, # Output only. Reserved for future use. "satisfiesPzs": True or False, # Output only. Reserved for future use. "state": "A String", # Output only. The current serving state of the instance. "uid": "A String", # Output only. The system-generated UID of the resource. The UID is assigned when the resource is created, and it is retained until it is deleted. @@ -469,6 +471,7 @@

Method Details

"nodeCount": 42, # Read capacity, i.e. number of nodes in a read pool instance. }, "reconciling": True or False, # Output only. Reconciling (https://google.aip.dev/128#reconciliation). Set to true if the current state of Instance does not match the user's intended state, and the service is actively updating the resource to reconcile them. This can happen due to user-triggered updates or system actions like failover or maintenance. + "satisfiesPzi": True or False, # Output only. Reserved for future use. "satisfiesPzs": True or False, # Output only. Reserved for future use. "state": "A String", # Output only. The current serving state of the instance. "uid": "A String", # Output only. The system-generated UID of the resource. The UID is assigned when the resource is created, and it is retained until it is deleted. @@ -621,6 +624,7 @@

Method Details

"nodeCount": 42, # Read capacity, i.e. number of nodes in a read pool instance. }, "reconciling": True or False, # Output only. Reconciling (https://google.aip.dev/128#reconciliation). Set to true if the current state of Instance does not match the user's intended state, and the service is actively updating the resource to reconcile them. This can happen due to user-triggered updates or system actions like failover or maintenance. + "satisfiesPzi": True or False, # Output only. Reserved for future use. "satisfiesPzs": True or False, # Output only. Reserved for future use. "state": "A String", # Output only. The current serving state of the instance. "uid": "A String", # Output only. The system-generated UID of the resource. The UID is assigned when the resource is created, and it is retained until it is deleted. @@ -713,6 +717,7 @@

Method Details

"nodeCount": 42, # Read capacity, i.e. number of nodes in a read pool instance. }, "reconciling": True or False, # Output only. Reconciling (https://google.aip.dev/128#reconciliation). Set to true if the current state of Instance does not match the user's intended state, and the service is actively updating the resource to reconcile them. This can happen due to user-triggered updates or system actions like failover or maintenance. + "satisfiesPzi": True or False, # Output only. Reserved for future use. "satisfiesPzs": True or False, # Output only. Reserved for future use. "state": "A String", # Output only. The current serving state of the instance. "uid": "A String", # Output only. The system-generated UID of the resource. The UID is assigned when the resource is created, and it is retained until it is deleted. diff --git a/docs/dyn/alloydb_v1beta.projects.locations.backups.html b/docs/dyn/alloydb_v1beta.projects.locations.backups.html index e682eaacdb2..91cef5ebbd0 100644 --- a/docs/dyn/alloydb_v1beta.projects.locations.backups.html +++ b/docs/dyn/alloydb_v1beta.projects.locations.backups.html @@ -141,6 +141,7 @@

Method Details

}, "name": "A String", # Output only. The name of the backup resource with the format: * projects/{project}/locations/{region}/backups/{backup_id} where the cluster and backup ID segments should satisfy the regex expression `[a-z]([a-z0-9-]{0,61}[a-z0-9])?`, e.g. 1-63 characters of lowercase letters, numbers, and dashes, starting with a letter, and ending with a letter or number. For more details see https://google.aip.dev/122. The prefix of the backup resource name is the name of the parent resource: * projects/{project}/locations/{region} "reconciling": True or False, # Output only. Reconciling (https://google.aip.dev/128#reconciliation), if true, indicates that the service is actively updating the resource. This can happen due to user-triggered updates or system actions like failover or maintenance. + "satisfiesPzs": True or False, # Output only. Reserved for future use. "sizeBytes": "A String", # Output only. The size of the backup in bytes. "state": "A String", # Output only. The current state of the backup. "type": "A String", # The backup type, which suggests the trigger for the backup. @@ -263,6 +264,7 @@

Method Details

}, "name": "A String", # Output only. The name of the backup resource with the format: * projects/{project}/locations/{region}/backups/{backup_id} where the cluster and backup ID segments should satisfy the regex expression `[a-z]([a-z0-9-]{0,61}[a-z0-9])?`, e.g. 1-63 characters of lowercase letters, numbers, and dashes, starting with a letter, and ending with a letter or number. For more details see https://google.aip.dev/122. The prefix of the backup resource name is the name of the parent resource: * projects/{project}/locations/{region} "reconciling": True or False, # Output only. Reconciling (https://google.aip.dev/128#reconciliation), if true, indicates that the service is actively updating the resource. This can happen due to user-triggered updates or system actions like failover or maintenance. + "satisfiesPzs": True or False, # Output only. Reserved for future use. "sizeBytes": "A String", # Output only. The size of the backup in bytes. "state": "A String", # Output only. The current state of the backup. "type": "A String", # The backup type, which suggests the trigger for the backup. @@ -322,6 +324,7 @@

Method Details

}, "name": "A String", # Output only. The name of the backup resource with the format: * projects/{project}/locations/{region}/backups/{backup_id} where the cluster and backup ID segments should satisfy the regex expression `[a-z]([a-z0-9-]{0,61}[a-z0-9])?`, e.g. 1-63 characters of lowercase letters, numbers, and dashes, starting with a letter, and ending with a letter or number. For more details see https://google.aip.dev/122. The prefix of the backup resource name is the name of the parent resource: * projects/{project}/locations/{region} "reconciling": True or False, # Output only. Reconciling (https://google.aip.dev/128#reconciliation), if true, indicates that the service is actively updating the resource. This can happen due to user-triggered updates or system actions like failover or maintenance. + "satisfiesPzs": True or False, # Output only. Reserved for future use. "sizeBytes": "A String", # Output only. The size of the backup in bytes. "state": "A String", # Output only. The current state of the backup. "type": "A String", # The backup type, which suggests the trigger for the backup. @@ -390,6 +393,7 @@

Method Details

}, "name": "A String", # Output only. The name of the backup resource with the format: * projects/{project}/locations/{region}/backups/{backup_id} where the cluster and backup ID segments should satisfy the regex expression `[a-z]([a-z0-9-]{0,61}[a-z0-9])?`, e.g. 1-63 characters of lowercase letters, numbers, and dashes, starting with a letter, and ending with a letter or number. For more details see https://google.aip.dev/122. The prefix of the backup resource name is the name of the parent resource: * projects/{project}/locations/{region} "reconciling": True or False, # Output only. Reconciling (https://google.aip.dev/128#reconciliation), if true, indicates that the service is actively updating the resource. This can happen due to user-triggered updates or system actions like failover or maintenance. + "satisfiesPzs": True or False, # Output only. Reserved for future use. "sizeBytes": "A String", # Output only. The size of the backup in bytes. "state": "A String", # Output only. The current state of the backup. "type": "A String", # The backup type, which suggests the trigger for the backup. diff --git a/docs/dyn/alloydb_v1beta.projects.locations.clusters.html b/docs/dyn/alloydb_v1beta.projects.locations.clusters.html index bcf65ba84d9..651b393d49c 100644 --- a/docs/dyn/alloydb_v1beta.projects.locations.clusters.html +++ b/docs/dyn/alloydb_v1beta.projects.locations.clusters.html @@ -229,6 +229,7 @@

Method Details

], }, "reconciling": True or False, # Output only. Reconciling (https://google.aip.dev/128#reconciliation). Set to true if the current state of Cluster does not match the user's intended state, and the service is actively updating the resource to reconcile them. This can happen due to user-triggered updates or system actions like failover or maintenance. + "satisfiesPzs": True or False, # Output only. Reserved for future use. "secondaryConfig": { # Configuration information for the secondary cluster. This should be set if and only if the cluster is of type SECONDARY. # Cross Region replication config specific to SECONDARY cluster. "primaryClusterName": "A String", # The name of the primary cluster name with the format: * projects/{project}/locations/{region}/clusters/{cluster_id} }, @@ -379,6 +380,7 @@

Method Details

], }, "reconciling": True or False, # Output only. Reconciling (https://google.aip.dev/128#reconciliation). Set to true if the current state of Cluster does not match the user's intended state, and the service is actively updating the resource to reconcile them. This can happen due to user-triggered updates or system actions like failover or maintenance. + "satisfiesPzs": True or False, # Output only. Reserved for future use. "secondaryConfig": { # Configuration information for the secondary cluster. This should be set if and only if the cluster is of type SECONDARY. # Cross Region replication config specific to SECONDARY cluster. "primaryClusterName": "A String", # The name of the primary cluster name with the format: * projects/{project}/locations/{region}/clusters/{cluster_id} }, @@ -612,6 +614,7 @@

Method Details

], }, "reconciling": True or False, # Output only. Reconciling (https://google.aip.dev/128#reconciliation). Set to true if the current state of Cluster does not match the user's intended state, and the service is actively updating the resource to reconcile them. This can happen due to user-triggered updates or system actions like failover or maintenance. + "satisfiesPzs": True or False, # Output only. Reserved for future use. "secondaryConfig": { # Configuration information for the secondary cluster. This should be set if and only if the cluster is of type SECONDARY. # Cross Region replication config specific to SECONDARY cluster. "primaryClusterName": "A String", # The name of the primary cluster name with the format: * projects/{project}/locations/{region}/clusters/{cluster_id} }, @@ -742,6 +745,7 @@

Method Details

], }, "reconciling": True or False, # Output only. Reconciling (https://google.aip.dev/128#reconciliation). Set to true if the current state of Cluster does not match the user's intended state, and the service is actively updating the resource to reconcile them. This can happen due to user-triggered updates or system actions like failover or maintenance. + "satisfiesPzs": True or False, # Output only. Reserved for future use. "secondaryConfig": { # Configuration information for the secondary cluster. This should be set if and only if the cluster is of type SECONDARY. # Cross Region replication config specific to SECONDARY cluster. "primaryClusterName": "A String", # The name of the primary cluster name with the format: * projects/{project}/locations/{region}/clusters/{cluster_id} }, @@ -881,6 +885,7 @@

Method Details

], }, "reconciling": True or False, # Output only. Reconciling (https://google.aip.dev/128#reconciliation). Set to true if the current state of Cluster does not match the user's intended state, and the service is actively updating the resource to reconcile them. This can happen due to user-triggered updates or system actions like failover or maintenance. + "satisfiesPzs": True or False, # Output only. Reserved for future use. "secondaryConfig": { # Configuration information for the secondary cluster. This should be set if and only if the cluster is of type SECONDARY. # Cross Region replication config specific to SECONDARY cluster. "primaryClusterName": "A String", # The name of the primary cluster name with the format: * projects/{project}/locations/{region}/clusters/{cluster_id} }, @@ -1081,6 +1086,7 @@

Method Details

], }, "reconciling": True or False, # Output only. Reconciling (https://google.aip.dev/128#reconciliation). Set to true if the current state of Cluster does not match the user's intended state, and the service is actively updating the resource to reconcile them. This can happen due to user-triggered updates or system actions like failover or maintenance. + "satisfiesPzs": True or False, # Output only. Reserved for future use. "secondaryConfig": { # Configuration information for the secondary cluster. This should be set if and only if the cluster is of type SECONDARY. # Cross Region replication config specific to SECONDARY cluster. "primaryClusterName": "A String", # The name of the primary cluster name with the format: * projects/{project}/locations/{region}/clusters/{cluster_id} }, diff --git a/docs/dyn/alloydb_v1beta.projects.locations.clusters.instances.html b/docs/dyn/alloydb_v1beta.projects.locations.clusters.instances.html index 05ed8149411..b8258a94438 100644 --- a/docs/dyn/alloydb_v1beta.projects.locations.clusters.instances.html +++ b/docs/dyn/alloydb_v1beta.projects.locations.clusters.instances.html @@ -172,6 +172,7 @@

Method Details

"nodeCount": 42, # Read capacity, i.e. number of nodes in a read pool instance. }, "reconciling": True or False, # Output only. Reconciling (https://google.aip.dev/128#reconciliation). Set to true if the current state of Instance does not match the user's intended state, and the service is actively updating the resource to reconcile them. This can happen due to user-triggered updates or system actions like failover or maintenance. + "satisfiesPzs": True or False, # Output only. Reserved for future use. "state": "A String", # Output only. The current serving state of the instance. "uid": "A String", # Output only. The system-generated UID of the resource. The UID is assigned when the resource is created, and it is retained until it is deleted. "updatePolicy": { # Policy to be used while updating the instance. # Update policy that will be applied during instance update. This field is not persisted when you update the instance. To use a non-default update policy, you must specify explicitly specify the value in each update request. @@ -274,6 +275,7 @@

Method Details

"nodeCount": 42, # Read capacity, i.e. number of nodes in a read pool instance. }, "reconciling": True or False, # Output only. Reconciling (https://google.aip.dev/128#reconciliation). Set to true if the current state of Instance does not match the user's intended state, and the service is actively updating the resource to reconcile them. This can happen due to user-triggered updates or system actions like failover or maintenance. + "satisfiesPzs": True or False, # Output only. Reserved for future use. "state": "A String", # Output only. The current serving state of the instance. "uid": "A String", # Output only. The system-generated UID of the resource. The UID is assigned when the resource is created, and it is retained until it is deleted. "updatePolicy": { # Policy to be used while updating the instance. # Update policy that will be applied during instance update. This field is not persisted when you update the instance. To use a non-default update policy, you must specify explicitly specify the value in each update request. @@ -467,6 +469,7 @@

Method Details

"nodeCount": 42, # Read capacity, i.e. number of nodes in a read pool instance. }, "reconciling": True or False, # Output only. Reconciling (https://google.aip.dev/128#reconciliation). Set to true if the current state of Instance does not match the user's intended state, and the service is actively updating the resource to reconcile them. This can happen due to user-triggered updates or system actions like failover or maintenance. + "satisfiesPzs": True or False, # Output only. Reserved for future use. "state": "A String", # Output only. The current serving state of the instance. "uid": "A String", # Output only. The system-generated UID of the resource. The UID is assigned when the resource is created, and it is retained until it is deleted. "updatePolicy": { # Policy to be used while updating the instance. # Update policy that will be applied during instance update. This field is not persisted when you update the instance. To use a non-default update policy, you must specify explicitly specify the value in each update request. @@ -618,6 +621,7 @@

Method Details

"nodeCount": 42, # Read capacity, i.e. number of nodes in a read pool instance. }, "reconciling": True or False, # Output only. Reconciling (https://google.aip.dev/128#reconciliation). Set to true if the current state of Instance does not match the user's intended state, and the service is actively updating the resource to reconcile them. This can happen due to user-triggered updates or system actions like failover or maintenance. + "satisfiesPzs": True or False, # Output only. Reserved for future use. "state": "A String", # Output only. The current serving state of the instance. "uid": "A String", # Output only. The system-generated UID of the resource. The UID is assigned when the resource is created, and it is retained until it is deleted. "updatePolicy": { # Policy to be used while updating the instance. # Update policy that will be applied during instance update. This field is not persisted when you update the instance. To use a non-default update policy, you must specify explicitly specify the value in each update request. @@ -709,6 +713,7 @@

Method Details

"nodeCount": 42, # Read capacity, i.e. number of nodes in a read pool instance. }, "reconciling": True or False, # Output only. Reconciling (https://google.aip.dev/128#reconciliation). Set to true if the current state of Instance does not match the user's intended state, and the service is actively updating the resource to reconcile them. This can happen due to user-triggered updates or system actions like failover or maintenance. + "satisfiesPzs": True or False, # Output only. Reserved for future use. "state": "A String", # Output only. The current serving state of the instance. "uid": "A String", # Output only. The system-generated UID of the resource. The UID is assigned when the resource is created, and it is retained until it is deleted. "updatePolicy": { # Policy to be used while updating the instance. # Update policy that will be applied during instance update. This field is not persisted when you update the instance. To use a non-default update policy, you must specify explicitly specify the value in each update request. diff --git a/docs/dyn/androidpublisher_v3.inappproducts.html b/docs/dyn/androidpublisher_v3.inappproducts.html index e93579025b0..64e78c0b68d 100644 --- a/docs/dyn/androidpublisher_v3.inappproducts.html +++ b/docs/dyn/androidpublisher_v3.inappproducts.html @@ -74,11 +74,20 @@

Google Play Android Developer API . inappproducts

Instance Methods

+

+ batchDelete(packageName, body=None, x__xgafv=None)

+

Deletes in-app products (managed products or subscriptions). Set the latencyTolerance field on nested requests to PRODUCT_UPDATE_LATENCY_TOLERANCE_LATENCY_TOLERANT to achieve maximum update throughput. This method should not be used to delete subscriptions. See [this article](https://android-developers.googleblog.com/2023/06/changes-to-google-play-developer-api-june-2023.html) for more information.

+

+ batchGet(packageName, sku=None, x__xgafv=None)

+

Reads multiple in-app products, which can be managed products or subscriptions. This method should not be used to retrieve subscriptions. See [this article](https://android-developers.googleblog.com/2023/06/changes-to-google-play-developer-api-june-2023.html) for more information.

+

+ batchUpdate(packageName, body=None, x__xgafv=None)

+

Updates or inserts one or more in-app products (managed products or subscriptions). Set the latencyTolerance field on nested requests to PRODUCT_UPDATE_LATENCY_TOLERANCE_LATENCY_TOLERANT to achieve maximum update throughput. This method should no longer be used to update subscriptions. See [this article](https://android-developers.googleblog.com/2023/06/changes-to-google-play-developer-api-june-2023.html) for more information.

close()

Close httplib2 connections.

- delete(packageName, sku, x__xgafv=None)

+ delete(packageName, sku, latencyTolerance=None, x__xgafv=None)

Deletes an in-app product (a managed product or a subscription). This method should no longer be used to delete subscriptions. See [this article](https://android-developers.googleblog.com/2023/06/changes-to-google-play-developer-api-june-2023.html) for more information.

get(packageName, sku, x__xgafv=None)

@@ -90,24 +99,264 @@

Instance Methods

list(packageName, maxResults=None, startIndex=None, token=None, x__xgafv=None)

Lists all in-app products - both managed products and subscriptions. If an app has a large number of in-app products, the response may be paginated. In this case the response field `tokenPagination.nextPageToken` will be set and the caller should provide its value as a `token` request parameter to retrieve the next page. This method should no longer be used to retrieve subscriptions. See [this article](https://android-developers.googleblog.com/2023/06/changes-to-google-play-developer-api-june-2023.html) for more information.

- patch(packageName, sku, autoConvertMissingPrices=None, body=None, x__xgafv=None)

+ patch(packageName, sku, autoConvertMissingPrices=None, body=None, latencyTolerance=None, x__xgafv=None)

Patches an in-app product (a managed product or a subscription). This method should no longer be used to update subscriptions. See [this article](https://android-developers.googleblog.com/2023/06/changes-to-google-play-developer-api-june-2023.html) for more information.

- update(packageName, sku, allowMissing=None, autoConvertMissingPrices=None, body=None, x__xgafv=None)

+ update(packageName, sku, allowMissing=None, autoConvertMissingPrices=None, body=None, latencyTolerance=None, x__xgafv=None)

Updates an in-app product (a managed product or a subscription). This method should no longer be used to update subscriptions. See [this article](https://android-developers.googleblog.com/2023/06/changes-to-google-play-developer-api-june-2023.html) for more information.

Method Details

+
+ batchDelete(packageName, body=None, x__xgafv=None) +
Deletes in-app products (managed products or subscriptions). Set the latencyTolerance field on nested requests to PRODUCT_UPDATE_LATENCY_TOLERANCE_LATENCY_TOLERANT to achieve maximum update throughput. This method should not be used to delete subscriptions. See [this article](https://android-developers.googleblog.com/2023/06/changes-to-google-play-developer-api-june-2023.html) for more information.
+
+Args:
+  packageName: string, Package name of the app. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Request to delete multiple in-app products.
+  "requests": [ # Individual delete requests. At least one request is required. Can contain up to 100 requests. All requests must correspond to different in-app products.
+    { # Request to delete an in-app product.
+      "latencyTolerance": "A String", # Optional. The latency tolerance for the propagation of this product update. Defaults to latency-sensitive.
+      "packageName": "A String", # Package name of the app.
+      "sku": "A String", # Unique identifier for the in-app product.
+    },
+  ],
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+
+ +
+ batchGet(packageName, sku=None, x__xgafv=None) +
Reads multiple in-app products, which can be managed products or subscriptions. This method should not be used to retrieve subscriptions. See [this article](https://android-developers.googleblog.com/2023/06/changes-to-google-play-developer-api-june-2023.html) for more information.
+
+Args:
+  packageName: string, Package name of the app. (required)
+  sku: string, Unique identifier for the in-app products. (repeated)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response message for BatchGetSubscriptions endpoint.
+  "inappproduct": [ # The list of requested in-app products, in the same order as the request.
+    { # An in-app product. The resource for InappproductsService.
+      "defaultLanguage": "A String", # Default language of the localized data, as defined by BCP-47. e.g. "en-US".
+      "defaultPrice": { # Definition of a price, i.e. currency and units. # Default price. Cannot be zero, as in-app products are never free. Always in the developer's Checkout merchant currency.
+        "currency": "A String", # 3 letter Currency code, as defined by ISO 4217. See java/com/google/common/money/CurrencyCode.java
+        "priceMicros": "A String", # Price in 1/million of the currency base unit, represented as a string.
+      },
+      "gracePeriod": "A String", # Grace period of the subscription, specified in ISO 8601 format. Allows developers to give their subscribers a grace period when the payment for the new recurrence period is declined. Acceptable values are P0D (zero days), P3D (three days), P7D (seven days), P14D (14 days), and P30D (30 days).
+      "listings": { # List of localized title and description data. Map key is the language of the localized data, as defined by BCP-47, e.g. "en-US".
+        "a_key": { # Store listing of a single in-app product.
+          "benefits": [ # Localized entitlement benefits for a subscription.
+            "A String",
+          ],
+          "description": "A String", # Description for the store listing.
+          "title": "A String", # Title for the store listing.
+        },
+      },
+      "managedProductTaxesAndComplianceSettings": { # Details about taxation and legal compliance for managed products. # Details about taxes and legal compliance. Only applicable to managed products.
+        "eeaWithdrawalRightType": "A String", # Digital content or service classification for products distributed to users in the European Economic Area (EEA). The withdrawal regime under EEA consumer laws depends on this classification. Refer to the [Help Center article](https://support.google.com/googleplay/android-developer/answer/10463498) for more information.
+        "isTokenizedDigitalAsset": True or False, # Whether this in-app product is declared as a product representing a tokenized digital asset.
+        "taxRateInfoByRegionCode": { # A mapping from region code to tax rate details. The keys are region codes as defined by Unicode's "CLDR".
+          "a_key": { # Specified details about taxation in a given geographical region.
+            "eligibleForStreamingServiceTaxRate": True or False, # You must tell us if your app contains streaming products to correctly charge US state and local sales tax. Field only supported in United States.
+            "streamingTaxType": "A String", # To collect communications or amusement taxes in the United States, choose the appropriate tax category. [Learn more](https://support.google.com/googleplay/android-developer/answer/10463498#streaming_tax).
+            "taxTier": "A String", # Tax tier to specify reduced tax rate. Developers who sell digital news, magazines, newspapers, books, or audiobooks in various regions may be eligible for reduced tax rates. [Learn more](https://support.google.com/googleplay/android-developer/answer/10463498).
+          },
+        },
+      },
+      "packageName": "A String", # Package name of the parent app.
+      "prices": { # Prices per buyer region. None of these can be zero, as in-app products are never free. Map key is region code, as defined by ISO 3166-2.
+        "a_key": { # Definition of a price, i.e. currency and units.
+          "currency": "A String", # 3 letter Currency code, as defined by ISO 4217. See java/com/google/common/money/CurrencyCode.java
+          "priceMicros": "A String", # Price in 1/million of the currency base unit, represented as a string.
+        },
+      },
+      "purchaseType": "A String", # The type of the product, e.g. a recurring subscription.
+      "sku": "A String", # Stock-keeping-unit (SKU) of the product, unique within an app.
+      "status": "A String", # The status of the product, e.g. whether it's active.
+      "subscriptionPeriod": "A String", # Subscription period, specified in ISO 8601 format. Acceptable values are P1W (one week), P1M (one month), P3M (three months), P6M (six months), and P1Y (one year).
+      "subscriptionTaxesAndComplianceSettings": { # Details about taxation, Google Play policy and legal compliance for subscription products. # Details about taxes and legal compliance. Only applicable to subscription products.
+        "eeaWithdrawalRightType": "A String", # Digital content or service classification for products distributed to users in the European Economic Area (EEA). The withdrawal regime under EEA consumer laws depends on this classification. Refer to the [Help Center article](https://support.google.com/googleplay/android-developer/answer/10463498) for more information.
+        "isTokenizedDigitalAsset": True or False, # Whether this subscription is declared as a product representing a tokenized digital asset.
+        "taxRateInfoByRegionCode": { # A mapping from region code to tax rate details. The keys are region codes as defined by Unicode's "CLDR".
+          "a_key": { # Specified details about taxation in a given geographical region.
+            "eligibleForStreamingServiceTaxRate": True or False, # You must tell us if your app contains streaming products to correctly charge US state and local sales tax. Field only supported in United States.
+            "streamingTaxType": "A String", # To collect communications or amusement taxes in the United States, choose the appropriate tax category. [Learn more](https://support.google.com/googleplay/android-developer/answer/10463498#streaming_tax).
+            "taxTier": "A String", # Tax tier to specify reduced tax rate. Developers who sell digital news, magazines, newspapers, books, or audiobooks in various regions may be eligible for reduced tax rates. [Learn more](https://support.google.com/googleplay/android-developer/answer/10463498).
+          },
+        },
+      },
+      "trialPeriod": "A String", # Trial period, specified in ISO 8601 format. Acceptable values are anything between P7D (seven days) and P999D (999 days).
+    },
+  ],
+}
+
+ +
+ batchUpdate(packageName, body=None, x__xgafv=None) +
Updates or inserts one or more in-app products (managed products or subscriptions). Set the latencyTolerance field on nested requests to PRODUCT_UPDATE_LATENCY_TOLERANCE_LATENCY_TOLERANT to achieve maximum update throughput. This method should no longer be used to update subscriptions. See [this article](https://android-developers.googleblog.com/2023/06/changes-to-google-play-developer-api-june-2023.html) for more information.
+
+Args:
+  packageName: string, Package name of the app. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Request to update or insert one or more in-app products.
+  "requests": [ # Required. Individual update requests. At least one request is required. Can contain up to 100 requests. All requests must correspond to different in-app products.
+    { # Request to update an in-app product.
+      "allowMissing": True or False, # If set to true, and the in-app product with the given package_name and sku doesn't exist, the in-app product will be created.
+      "autoConvertMissingPrices": True or False, # If true the prices for all regions targeted by the parent app that don't have a price specified for this in-app product will be auto converted to the target currency based on the default price. Defaults to false.
+      "inappproduct": { # An in-app product. The resource for InappproductsService. # The new in-app product.
+        "defaultLanguage": "A String", # Default language of the localized data, as defined by BCP-47. e.g. "en-US".
+        "defaultPrice": { # Definition of a price, i.e. currency and units. # Default price. Cannot be zero, as in-app products are never free. Always in the developer's Checkout merchant currency.
+          "currency": "A String", # 3 letter Currency code, as defined by ISO 4217. See java/com/google/common/money/CurrencyCode.java
+          "priceMicros": "A String", # Price in 1/million of the currency base unit, represented as a string.
+        },
+        "gracePeriod": "A String", # Grace period of the subscription, specified in ISO 8601 format. Allows developers to give their subscribers a grace period when the payment for the new recurrence period is declined. Acceptable values are P0D (zero days), P3D (three days), P7D (seven days), P14D (14 days), and P30D (30 days).
+        "listings": { # List of localized title and description data. Map key is the language of the localized data, as defined by BCP-47, e.g. "en-US".
+          "a_key": { # Store listing of a single in-app product.
+            "benefits": [ # Localized entitlement benefits for a subscription.
+              "A String",
+            ],
+            "description": "A String", # Description for the store listing.
+            "title": "A String", # Title for the store listing.
+          },
+        },
+        "managedProductTaxesAndComplianceSettings": { # Details about taxation and legal compliance for managed products. # Details about taxes and legal compliance. Only applicable to managed products.
+          "eeaWithdrawalRightType": "A String", # Digital content or service classification for products distributed to users in the European Economic Area (EEA). The withdrawal regime under EEA consumer laws depends on this classification. Refer to the [Help Center article](https://support.google.com/googleplay/android-developer/answer/10463498) for more information.
+          "isTokenizedDigitalAsset": True or False, # Whether this in-app product is declared as a product representing a tokenized digital asset.
+          "taxRateInfoByRegionCode": { # A mapping from region code to tax rate details. The keys are region codes as defined by Unicode's "CLDR".
+            "a_key": { # Specified details about taxation in a given geographical region.
+              "eligibleForStreamingServiceTaxRate": True or False, # You must tell us if your app contains streaming products to correctly charge US state and local sales tax. Field only supported in United States.
+              "streamingTaxType": "A String", # To collect communications or amusement taxes in the United States, choose the appropriate tax category. [Learn more](https://support.google.com/googleplay/android-developer/answer/10463498#streaming_tax).
+              "taxTier": "A String", # Tax tier to specify reduced tax rate. Developers who sell digital news, magazines, newspapers, books, or audiobooks in various regions may be eligible for reduced tax rates. [Learn more](https://support.google.com/googleplay/android-developer/answer/10463498).
+            },
+          },
+        },
+        "packageName": "A String", # Package name of the parent app.
+        "prices": { # Prices per buyer region. None of these can be zero, as in-app products are never free. Map key is region code, as defined by ISO 3166-2.
+          "a_key": { # Definition of a price, i.e. currency and units.
+            "currency": "A String", # 3 letter Currency code, as defined by ISO 4217. See java/com/google/common/money/CurrencyCode.java
+            "priceMicros": "A String", # Price in 1/million of the currency base unit, represented as a string.
+          },
+        },
+        "purchaseType": "A String", # The type of the product, e.g. a recurring subscription.
+        "sku": "A String", # Stock-keeping-unit (SKU) of the product, unique within an app.
+        "status": "A String", # The status of the product, e.g. whether it's active.
+        "subscriptionPeriod": "A String", # Subscription period, specified in ISO 8601 format. Acceptable values are P1W (one week), P1M (one month), P3M (three months), P6M (six months), and P1Y (one year).
+        "subscriptionTaxesAndComplianceSettings": { # Details about taxation, Google Play policy and legal compliance for subscription products. # Details about taxes and legal compliance. Only applicable to subscription products.
+          "eeaWithdrawalRightType": "A String", # Digital content or service classification for products distributed to users in the European Economic Area (EEA). The withdrawal regime under EEA consumer laws depends on this classification. Refer to the [Help Center article](https://support.google.com/googleplay/android-developer/answer/10463498) for more information.
+          "isTokenizedDigitalAsset": True or False, # Whether this subscription is declared as a product representing a tokenized digital asset.
+          "taxRateInfoByRegionCode": { # A mapping from region code to tax rate details. The keys are region codes as defined by Unicode's "CLDR".
+            "a_key": { # Specified details about taxation in a given geographical region.
+              "eligibleForStreamingServiceTaxRate": True or False, # You must tell us if your app contains streaming products to correctly charge US state and local sales tax. Field only supported in United States.
+              "streamingTaxType": "A String", # To collect communications or amusement taxes in the United States, choose the appropriate tax category. [Learn more](https://support.google.com/googleplay/android-developer/answer/10463498#streaming_tax).
+              "taxTier": "A String", # Tax tier to specify reduced tax rate. Developers who sell digital news, magazines, newspapers, books, or audiobooks in various regions may be eligible for reduced tax rates. [Learn more](https://support.google.com/googleplay/android-developer/answer/10463498).
+            },
+          },
+        },
+        "trialPeriod": "A String", # Trial period, specified in ISO 8601 format. Acceptable values are anything between P7D (seven days) and P999D (999 days).
+      },
+      "latencyTolerance": "A String", # Optional. The latency tolerance for the propagation of this product update. Defaults to latency-sensitive.
+      "packageName": "A String", # Package name of the app.
+      "sku": "A String", # Unique identifier for the in-app product.
+    },
+  ],
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response for a batch in-app product update.
+  "inappproducts": [ # The updated or inserted in-app products.
+    { # An in-app product. The resource for InappproductsService.
+      "defaultLanguage": "A String", # Default language of the localized data, as defined by BCP-47. e.g. "en-US".
+      "defaultPrice": { # Definition of a price, i.e. currency and units. # Default price. Cannot be zero, as in-app products are never free. Always in the developer's Checkout merchant currency.
+        "currency": "A String", # 3 letter Currency code, as defined by ISO 4217. See java/com/google/common/money/CurrencyCode.java
+        "priceMicros": "A String", # Price in 1/million of the currency base unit, represented as a string.
+      },
+      "gracePeriod": "A String", # Grace period of the subscription, specified in ISO 8601 format. Allows developers to give their subscribers a grace period when the payment for the new recurrence period is declined. Acceptable values are P0D (zero days), P3D (three days), P7D (seven days), P14D (14 days), and P30D (30 days).
+      "listings": { # List of localized title and description data. Map key is the language of the localized data, as defined by BCP-47, e.g. "en-US".
+        "a_key": { # Store listing of a single in-app product.
+          "benefits": [ # Localized entitlement benefits for a subscription.
+            "A String",
+          ],
+          "description": "A String", # Description for the store listing.
+          "title": "A String", # Title for the store listing.
+        },
+      },
+      "managedProductTaxesAndComplianceSettings": { # Details about taxation and legal compliance for managed products. # Details about taxes and legal compliance. Only applicable to managed products.
+        "eeaWithdrawalRightType": "A String", # Digital content or service classification for products distributed to users in the European Economic Area (EEA). The withdrawal regime under EEA consumer laws depends on this classification. Refer to the [Help Center article](https://support.google.com/googleplay/android-developer/answer/10463498) for more information.
+        "isTokenizedDigitalAsset": True or False, # Whether this in-app product is declared as a product representing a tokenized digital asset.
+        "taxRateInfoByRegionCode": { # A mapping from region code to tax rate details. The keys are region codes as defined by Unicode's "CLDR".
+          "a_key": { # Specified details about taxation in a given geographical region.
+            "eligibleForStreamingServiceTaxRate": True or False, # You must tell us if your app contains streaming products to correctly charge US state and local sales tax. Field only supported in United States.
+            "streamingTaxType": "A String", # To collect communications or amusement taxes in the United States, choose the appropriate tax category. [Learn more](https://support.google.com/googleplay/android-developer/answer/10463498#streaming_tax).
+            "taxTier": "A String", # Tax tier to specify reduced tax rate. Developers who sell digital news, magazines, newspapers, books, or audiobooks in various regions may be eligible for reduced tax rates. [Learn more](https://support.google.com/googleplay/android-developer/answer/10463498).
+          },
+        },
+      },
+      "packageName": "A String", # Package name of the parent app.
+      "prices": { # Prices per buyer region. None of these can be zero, as in-app products are never free. Map key is region code, as defined by ISO 3166-2.
+        "a_key": { # Definition of a price, i.e. currency and units.
+          "currency": "A String", # 3 letter Currency code, as defined by ISO 4217. See java/com/google/common/money/CurrencyCode.java
+          "priceMicros": "A String", # Price in 1/million of the currency base unit, represented as a string.
+        },
+      },
+      "purchaseType": "A String", # The type of the product, e.g. a recurring subscription.
+      "sku": "A String", # Stock-keeping-unit (SKU) of the product, unique within an app.
+      "status": "A String", # The status of the product, e.g. whether it's active.
+      "subscriptionPeriod": "A String", # Subscription period, specified in ISO 8601 format. Acceptable values are P1W (one week), P1M (one month), P3M (three months), P6M (six months), and P1Y (one year).
+      "subscriptionTaxesAndComplianceSettings": { # Details about taxation, Google Play policy and legal compliance for subscription products. # Details about taxes and legal compliance. Only applicable to subscription products.
+        "eeaWithdrawalRightType": "A String", # Digital content or service classification for products distributed to users in the European Economic Area (EEA). The withdrawal regime under EEA consumer laws depends on this classification. Refer to the [Help Center article](https://support.google.com/googleplay/android-developer/answer/10463498) for more information.
+        "isTokenizedDigitalAsset": True or False, # Whether this subscription is declared as a product representing a tokenized digital asset.
+        "taxRateInfoByRegionCode": { # A mapping from region code to tax rate details. The keys are region codes as defined by Unicode's "CLDR".
+          "a_key": { # Specified details about taxation in a given geographical region.
+            "eligibleForStreamingServiceTaxRate": True or False, # You must tell us if your app contains streaming products to correctly charge US state and local sales tax. Field only supported in United States.
+            "streamingTaxType": "A String", # To collect communications or amusement taxes in the United States, choose the appropriate tax category. [Learn more](https://support.google.com/googleplay/android-developer/answer/10463498#streaming_tax).
+            "taxTier": "A String", # Tax tier to specify reduced tax rate. Developers who sell digital news, magazines, newspapers, books, or audiobooks in various regions may be eligible for reduced tax rates. [Learn more](https://support.google.com/googleplay/android-developer/answer/10463498).
+          },
+        },
+      },
+      "trialPeriod": "A String", # Trial period, specified in ISO 8601 format. Acceptable values are anything between P7D (seven days) and P999D (999 days).
+    },
+  ],
+}
+
+
close()
Close httplib2 connections.
- delete(packageName, sku, x__xgafv=None) + delete(packageName, sku, latencyTolerance=None, x__xgafv=None)
Deletes an in-app product (a managed product or a subscription). This method should no longer be used to delete subscriptions. See [this article](https://android-developers.googleblog.com/2023/06/changes-to-google-play-developer-api-june-2023.html) for more information.
 
 Args:
   packageName: string, Package name of the app. (required)
   sku: string, Unique identifier for the in-app product. (required)
+  latencyTolerance: string, Optional. The latency tolerance for the propagation of this product update. Defaults to latency-sensitive.
+    Allowed values
+      PRODUCT_UPDATE_LATENCY_TOLERANCE_UNSPECIFIED - Defaults to PRODUCT_UPDATE_LATENCY_TOLERANCE_LATENCY_SENSITIVE.
+      PRODUCT_UPDATE_LATENCY_TOLERANCE_LATENCY_SENSITIVE - The update will propagate to clients within several minutes on average and up to a few hours in rare cases. Throughput is limited to 7,200 updates per app per hour.
+      PRODUCT_UPDATE_LATENCY_TOLERANCE_LATENCY_TOLERANT - The update will propagate to clients within 24 hours. Supports high throughput of up to 720,000 updates per app per hour using batch modification methods.
   x__xgafv: string, V1 error format.
     Allowed values
       1 - v1 error format
@@ -391,7 +640,7 @@ 

Method Details

- patch(packageName, sku, autoConvertMissingPrices=None, body=None, x__xgafv=None) + patch(packageName, sku, autoConvertMissingPrices=None, body=None, latencyTolerance=None, x__xgafv=None)
Patches an in-app product (a managed product or a subscription). This method should no longer be used to update subscriptions. See [this article](https://android-developers.googleblog.com/2023/06/changes-to-google-play-developer-api-june-2023.html) for more information.
 
 Args:
@@ -453,6 +702,11 @@ 

Method Details

} autoConvertMissingPrices: boolean, If true the prices for all regions targeted by the parent app that don't have a price specified for this in-app product will be auto converted to the target currency based on the default price. Defaults to false. + latencyTolerance: string, Optional. The latency tolerance for the propagation of this product update. Defaults to latency-sensitive. + Allowed values + PRODUCT_UPDATE_LATENCY_TOLERANCE_UNSPECIFIED - Defaults to PRODUCT_UPDATE_LATENCY_TOLERANCE_LATENCY_SENSITIVE. + PRODUCT_UPDATE_LATENCY_TOLERANCE_LATENCY_SENSITIVE - The update will propagate to clients within several minutes on average and up to a few hours in rare cases. Throughput is limited to 7,200 updates per app per hour. + PRODUCT_UPDATE_LATENCY_TOLERANCE_LATENCY_TOLERANT - The update will propagate to clients within 24 hours. Supports high throughput of up to 720,000 updates per app per hour using batch modification methods. x__xgafv: string, V1 error format. Allowed values 1 - v1 error format @@ -515,7 +769,7 @@

Method Details

- update(packageName, sku, allowMissing=None, autoConvertMissingPrices=None, body=None, x__xgafv=None) + update(packageName, sku, allowMissing=None, autoConvertMissingPrices=None, body=None, latencyTolerance=None, x__xgafv=None)
Updates an in-app product (a managed product or a subscription). This method should no longer be used to update subscriptions. See [this article](https://android-developers.googleblog.com/2023/06/changes-to-google-play-developer-api-june-2023.html) for more information.
 
 Args:
@@ -578,6 +832,11 @@ 

Method Details

allowMissing: boolean, If set to true, and the in-app product with the given package_name and sku doesn't exist, the in-app product will be created. autoConvertMissingPrices: boolean, If true the prices for all regions targeted by the parent app that don't have a price specified for this in-app product will be auto converted to the target currency based on the default price. Defaults to false. + latencyTolerance: string, Optional. The latency tolerance for the propagation of this product update. Defaults to latency-sensitive. + Allowed values + PRODUCT_UPDATE_LATENCY_TOLERANCE_UNSPECIFIED - Defaults to PRODUCT_UPDATE_LATENCY_TOLERANCE_LATENCY_SENSITIVE. + PRODUCT_UPDATE_LATENCY_TOLERANCE_LATENCY_SENSITIVE - The update will propagate to clients within several minutes on average and up to a few hours in rare cases. Throughput is limited to 7,200 updates per app per hour. + PRODUCT_UPDATE_LATENCY_TOLERANCE_LATENCY_TOLERANT - The update will propagate to clients within 24 hours. Supports high throughput of up to 720,000 updates per app per hour using batch modification methods. x__xgafv: string, V1 error format. Allowed values 1 - v1 error format diff --git a/docs/dyn/androidpublisher_v3.monetization.subscriptions.basePlans.html b/docs/dyn/androidpublisher_v3.monetization.subscriptions.basePlans.html index 74d648ce338..99a16a39307 100644 --- a/docs/dyn/androidpublisher_v3.monetization.subscriptions.basePlans.html +++ b/docs/dyn/androidpublisher_v3.monetization.subscriptions.basePlans.html @@ -82,6 +82,12 @@

Instance Methods

activate(packageName, productId, basePlanId, body=None, x__xgafv=None)

Activates a base plan. Once activated, base plans will be available to new subscribers.

+

+ batchMigratePrices(packageName, productId, body=None, x__xgafv=None)

+

Batch variant of the MigrateBasePlanPrices endpoint. Set the latencyTolerance field on nested requests to PRODUCT_UPDATE_LATENCY_TOLERANCE_LATENCY_TOLERANT to achieve maximum update throughput.

+

+ batchUpdateStates(packageName, productId, body=None, x__xgafv=None)

+

Activates or deactivates base plans across one or multiple subscriptions. Set the latencyTolerance field on nested requests to PRODUCT_UPDATE_LATENCY_TOLERANCE_LATENCY_TOLERANT to achieve maximum update throughput.

close()

Close httplib2 connections.

@@ -107,6 +113,10 @@

Method Details

The object takes the form of: { # Request message for ActivateBasePlan. + "basePlanId": "A String", # Required. The unique base plan ID of the base plan to activate. + "latencyTolerance": "A String", # Optional. The latency tolerance for the propagation of this product update. Defaults to latency-sensitive. + "packageName": "A String", # Required. The parent app (package name) of the base plan to activate. + "productId": "A String", # Required. The parent subscription (ID) of the base plan to activate. } x__xgafv: string, V1 error format. @@ -192,6 +202,169 @@

Method Details

}
+
+ batchMigratePrices(packageName, productId, body=None, x__xgafv=None) +
Batch variant of the MigrateBasePlanPrices endpoint. Set the latencyTolerance field on nested requests to PRODUCT_UPDATE_LATENCY_TOLERANCE_LATENCY_TOLERANT to achieve maximum update throughput.
+
+Args:
+  packageName: string, Required. The parent app (package name) for which the subscriptions should be created or updated. Must be equal to the package_name field on all the Subscription resources. (required)
+  productId: string, Required. The product ID of the parent subscription, if all updated offers belong to the same subscription. If this batch update spans multiple subscriptions, set this field to "-". Must be set. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Request message for BatchMigrateBasePlanPrices.
+  "requests": [ # Required. Up to 100 price migration requests. All requests must update different base plans.
+    { # Request message for MigrateBasePlanPrices.
+      "basePlanId": "A String", # Required. The unique base plan ID of the base plan to update prices on.
+      "latencyTolerance": "A String", # Optional. The latency tolerance for the propagation of this product update. Defaults to latency-sensitive.
+      "packageName": "A String", # Required. Package name of the parent app. Must be equal to the package_name field on the Subscription resource.
+      "productId": "A String", # Required. The ID of the subscription to update. Must be equal to the product_id field on the Subscription resource.
+      "regionalPriceMigrations": [ # Required. The regional prices to update.
+        { # Configuration for a price migration.
+          "oldestAllowedPriceVersionTime": "A String", # Required. The cutoff time for historical prices that subscribers can remain paying. Subscribers on prices which were available at this cutoff time or later will stay on their existing price. Subscribers on older prices will be migrated to the currently-offered price. The migrated subscribers will receive a notification that they will be paying a different price. Subscribers who do not agree to the new price will have their subscription ended at the next renewal.
+          "priceIncreaseType": "A String", # Optional. The behavior the caller wants users to see when there is a price increase during migration. If left unset, the behavior defaults to PRICE_INCREASE_TYPE_OPT_IN. Note that the first opt-out price increase migration for each app must be initiated in Play Console.
+          "regionCode": "A String", # Required. Region code this configuration applies to, as defined by ISO 3166-2, e.g. "US".
+        },
+      ],
+      "regionsVersion": { # The version of the available regions being used for the specified resource. # Required. The version of the available regions being used for the regional_price_migrations.
+        "version": "A String", # Required. A string representing the version of available regions being used for the specified resource. Regional prices for the resource have to be specified according to the information published in [this article](https://support.google.com/googleplay/android-developer/answer/10532353). Each time the supported locations substantially change, the version will be incremented. Using this field will ensure that creating and updating the resource with an older region's version and set of regional prices and currencies will succeed even though a new version is available. The latest version is 2022/02.
+      },
+    },
+  ],
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response message for BatchMigrateBasePlanPrices.
+  "responses": [ # Contains one response per requested price migration, in the same order as the request.
+    { # Response message for MigrateBasePlanPrices.
+    },
+  ],
+}
+
+ +
+ batchUpdateStates(packageName, productId, body=None, x__xgafv=None) +
Activates or deactivates base plans across one or multiple subscriptions. Set the latencyTolerance field on nested requests to PRODUCT_UPDATE_LATENCY_TOLERANCE_LATENCY_TOLERANT to achieve maximum update throughput.
+
+Args:
+  packageName: string, Required. The parent app (package name) of the updated base plans. (required)
+  productId: string, Required. The product ID of the parent subscription, if all updated base plans belong to the same subscription. If this batch update spans multiple subscriptions, set this field to "-". Must be set. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Request message for BatchUpdateBasePlanStates.
+  "requests": [ # Required. The update request list of up to 100 elements. All requests must update different base plans.
+    { # Request message to update the state of a subscription base plan.
+      "activateBasePlanRequest": { # Request message for ActivateBasePlan. # Activates a base plan. Once activated, base plans will be available to new subscribers.
+        "basePlanId": "A String", # Required. The unique base plan ID of the base plan to activate.
+        "latencyTolerance": "A String", # Optional. The latency tolerance for the propagation of this product update. Defaults to latency-sensitive.
+        "packageName": "A String", # Required. The parent app (package name) of the base plan to activate.
+        "productId": "A String", # Required. The parent subscription (ID) of the base plan to activate.
+      },
+      "deactivateBasePlanRequest": { # Request message for DeactivateBasePlan. # Deactivates a base plan. Once deactivated, the base plan will become unavailable to new subscribers, but existing subscribers will maintain their subscription
+        "basePlanId": "A String", # Required. The unique base plan ID of the base plan to deactivate.
+        "latencyTolerance": "A String", # Optional. The latency tolerance for the propagation of this product update. Defaults to latency-sensitive.
+        "packageName": "A String", # Required. The parent app (package name) of the base plan to deactivate.
+        "productId": "A String", # Required. The parent subscription (ID) of the base plan to deactivate.
+      },
+    },
+  ],
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response message for BatchUpdateBasePlanStates.
+  "subscriptions": [ # The list of updated subscriptions. This list will match the requests one to one, in the same order.
+    { # A single subscription for an app.
+      "archived": True or False, # Output only. Whether this subscription is archived. Archived subscriptions are not available to any subscriber any longer, cannot be updated, and are not returned in list requests unless the show archived flag is passed in.
+      "basePlans": [ # The set of base plans for this subscription. Represents the prices and duration of the subscription if no other offers apply.
+        { # A single base plan for a subscription.
+          "autoRenewingBasePlanType": { # Represents a base plan that automatically renews at the end of its subscription period. # Set when the base plan automatically renews at a regular interval.
+            "billingPeriodDuration": "A String", # Required. Subscription period, specified in ISO 8601 format. For a list of acceptable billing periods, refer to the help center.
+            "gracePeriodDuration": "A String", # Grace period of the subscription, specified in ISO 8601 format. Acceptable values are P0D (zero days), P3D (3 days), P7D (7 days), P14D (14 days), and P30D (30 days). If not specified, a default value will be used based on the recurring period duration.
+            "legacyCompatible": True or False, # Whether the renewing base plan is backward compatible. The backward compatible base plan is returned by the Google Play Billing Library deprecated method querySkuDetailsAsync(). Only one renewing base plan can be marked as legacy compatible for a given subscription.
+            "legacyCompatibleSubscriptionOfferId": "A String", # Subscription offer id which is legacy compatible. The backward compatible subscription offer is returned by the Google Play Billing Library deprecated method querySkuDetailsAsync(). Only one subscription offer can be marked as legacy compatible for a given renewing base plan. To have no Subscription offer as legacy compatible set this field as empty string.
+            "prorationMode": "A String", # The proration mode for the base plan determines what happens when a user switches to this plan from another base plan. If unspecified, defaults to CHARGE_ON_NEXT_BILLING_DATE.
+            "resubscribeState": "A String", # Whether users should be able to resubscribe to this base plan in Google Play surfaces. Defaults to RESUBSCRIBE_STATE_ACTIVE if not specified.
+          },
+          "basePlanId": "A String", # Required. Immutable. The unique identifier of this base plan. Must be unique within the subscription, and conform with RFC-1034. That is, this ID can only contain lower-case letters (a-z), numbers (0-9), and hyphens (-), and be at most 63 characters.
+          "offerTags": [ # List of up to 20 custom tags specified for this base plan, and returned to the app through the billing library. Subscription offers for this base plan will also receive these offer tags in the billing library.
+            { # Represents a custom tag specified for base plans and subscription offers.
+              "tag": "A String", # Must conform with RFC-1034. That is, this string can only contain lower-case letters (a-z), numbers (0-9), and hyphens (-), and be at most 20 characters.
+            },
+          ],
+          "otherRegionsConfig": { # Pricing information for any new locations Play may launch in. # Pricing information for any new locations Play may launch in the future. If omitted, the BasePlan will not be automatically available any new locations Play may launch in the future.
+            "eurPrice": { # Represents an amount of money with its currency type. # Required. Price in EUR to use for any new locations Play may launch in.
+              "currencyCode": "A String", # The three-letter currency code defined in ISO 4217.
+              "nanos": 42, # Number of nano (10^-9) units of the amount. The value must be between -999,999,999 and +999,999,999 inclusive. If `units` is positive, `nanos` must be positive or zero. If `units` is zero, `nanos` can be positive, zero, or negative. If `units` is negative, `nanos` must be negative or zero. For example $-1.75 is represented as `units`=-1 and `nanos`=-750,000,000.
+              "units": "A String", # The whole units of the amount. For example if `currencyCode` is `"USD"`, then 1 unit is one US dollar.
+            },
+            "newSubscriberAvailability": True or False, # Whether the base plan is available for new subscribers in any new locations Play may launch in. If not specified, this will default to false.
+            "usdPrice": { # Represents an amount of money with its currency type. # Required. Price in USD to use for any new locations Play may launch in.
+              "currencyCode": "A String", # The three-letter currency code defined in ISO 4217.
+              "nanos": 42, # Number of nano (10^-9) units of the amount. The value must be between -999,999,999 and +999,999,999 inclusive. If `units` is positive, `nanos` must be positive or zero. If `units` is zero, `nanos` can be positive, zero, or negative. If `units` is negative, `nanos` must be negative or zero. For example $-1.75 is represented as `units`=-1 and `nanos`=-750,000,000.
+              "units": "A String", # The whole units of the amount. For example if `currencyCode` is `"USD"`, then 1 unit is one US dollar.
+            },
+          },
+          "prepaidBasePlanType": { # Represents a base plan that does not automatically renew at the end of the base plan, and must be manually renewed by the user. # Set when the base plan does not automatically renew at the end of the billing period.
+            "billingPeriodDuration": "A String", # Required. Subscription period, specified in ISO 8601 format. For a list of acceptable billing periods, refer to the help center.
+            "timeExtension": "A String", # Whether users should be able to extend this prepaid base plan in Google Play surfaces. Defaults to TIME_EXTENSION_ACTIVE if not specified.
+          },
+          "regionalConfigs": [ # Region-specific information for this base plan.
+            { # Configuration for a base plan specific to a region.
+              "newSubscriberAvailability": True or False, # Whether the base plan in the specified region is available for new subscribers. Existing subscribers will not have their subscription canceled if this value is set to false. If not specified, this will default to false.
+              "price": { # Represents an amount of money with its currency type. # The price of the base plan in the specified region. Must be set if the base plan is available to new subscribers. Must be set in the currency that is linked to the specified region.
+                "currencyCode": "A String", # The three-letter currency code defined in ISO 4217.
+                "nanos": 42, # Number of nano (10^-9) units of the amount. The value must be between -999,999,999 and +999,999,999 inclusive. If `units` is positive, `nanos` must be positive or zero. If `units` is zero, `nanos` can be positive, zero, or negative. If `units` is negative, `nanos` must be negative or zero. For example $-1.75 is represented as `units`=-1 and `nanos`=-750,000,000.
+                "units": "A String", # The whole units of the amount. For example if `currencyCode` is `"USD"`, then 1 unit is one US dollar.
+              },
+              "regionCode": "A String", # Required. Region code this configuration applies to, as defined by ISO 3166-2, e.g. "US".
+            },
+          ],
+          "state": "A String", # Output only. The state of the base plan, i.e. whether it's active. Draft and inactive base plans can be activated or deleted. Active base plans can be made inactive. Inactive base plans can be canceled. This field cannot be changed by updating the resource. Use the dedicated endpoints instead.
+        },
+      ],
+      "listings": [ # Required. List of localized listings for this subscription. Must contain at least an entry for the default language of the parent app.
+        { # The consumer-visible metadata of a subscription.
+          "benefits": [ # A list of benefits shown to the user on platforms such as the Play Store and in restoration flows in the language of this listing. Plain text. Ordered list of at most four benefits.
+            "A String",
+          ],
+          "description": "A String", # The description of this subscription in the language of this listing. Maximum length - 80 characters. Plain text.
+          "languageCode": "A String", # Required. The language of this listing, as defined by BCP-47, e.g. "en-US".
+          "title": "A String", # Required. The title of this subscription in the language of this listing. Plain text.
+        },
+      ],
+      "packageName": "A String", # Immutable. Package name of the parent app.
+      "productId": "A String", # Immutable. Unique product ID of the product. Unique within the parent app. Product IDs must be composed of lower-case letters (a-z), numbers (0-9), underscores (_) and dots (.). It must start with a lower-case letter or number, and be between 1 and 40 (inclusive) characters in length.
+      "taxAndComplianceSettings": { # Details about taxation, Google Play policy and legal compliance for subscription products. # Details about taxes and legal compliance.
+        "eeaWithdrawalRightType": "A String", # Digital content or service classification for products distributed to users in the European Economic Area (EEA). The withdrawal regime under EEA consumer laws depends on this classification. Refer to the [Help Center article](https://support.google.com/googleplay/android-developer/answer/10463498) for more information.
+        "isTokenizedDigitalAsset": True or False, # Whether this subscription is declared as a product representing a tokenized digital asset.
+        "taxRateInfoByRegionCode": { # A mapping from region code to tax rate details. The keys are region codes as defined by Unicode's "CLDR".
+          "a_key": { # Specified details about taxation in a given geographical region.
+            "eligibleForStreamingServiceTaxRate": True or False, # You must tell us if your app contains streaming products to correctly charge US state and local sales tax. Field only supported in United States.
+            "streamingTaxType": "A String", # To collect communications or amusement taxes in the United States, choose the appropriate tax category. [Learn more](https://support.google.com/googleplay/android-developer/answer/10463498#streaming_tax).
+            "taxTier": "A String", # Tax tier to specify reduced tax rate. Developers who sell digital news, magazines, newspapers, books, or audiobooks in various regions may be eligible for reduced tax rates. [Learn more](https://support.google.com/googleplay/android-developer/answer/10463498).
+          },
+        },
+      },
+    },
+  ],
+}
+
+
close()
Close httplib2 connections.
@@ -209,6 +382,10 @@

Method Details

The object takes the form of: { # Request message for DeactivateBasePlan. + "basePlanId": "A String", # Required. The unique base plan ID of the base plan to deactivate. + "latencyTolerance": "A String", # Optional. The latency tolerance for the propagation of this product update. Defaults to latency-sensitive. + "packageName": "A String", # Required. The parent app (package name) of the base plan to deactivate. + "productId": "A String", # Required. The parent subscription (ID) of the base plan to deactivate. } x__xgafv: string, V1 error format. @@ -321,6 +498,10 @@

Method Details

The object takes the form of: { # Request message for MigrateBasePlanPrices. + "basePlanId": "A String", # Required. The unique base plan ID of the base plan to update prices on. + "latencyTolerance": "A String", # Optional. The latency tolerance for the propagation of this product update. Defaults to latency-sensitive. + "packageName": "A String", # Required. Package name of the parent app. Must be equal to the package_name field on the Subscription resource. + "productId": "A String", # Required. The ID of the subscription to update. Must be equal to the product_id field on the Subscription resource. "regionalPriceMigrations": [ # Required. The regional prices to update. { # Configuration for a price migration. "oldestAllowedPriceVersionTime": "A String", # Required. The cutoff time for historical prices that subscribers can remain paying. Subscribers on prices which were available at this cutoff time or later will stay on their existing price. Subscribers on older prices will be migrated to the currently-offered price. The migrated subscribers will receive a notification that they will be paying a different price. Subscribers who do not agree to the new price will have their subscription ended at the next renewal. diff --git a/docs/dyn/androidpublisher_v3.monetization.subscriptions.basePlans.offers.html b/docs/dyn/androidpublisher_v3.monetization.subscriptions.basePlans.offers.html index cb24c57d7fa..cfe724b4a78 100644 --- a/docs/dyn/androidpublisher_v3.monetization.subscriptions.basePlans.offers.html +++ b/docs/dyn/androidpublisher_v3.monetization.subscriptions.basePlans.offers.html @@ -77,6 +77,15 @@

Instance Methods

activate(packageName, productId, basePlanId, offerId, body=None, x__xgafv=None)

Activates a subscription offer. Once activated, subscription offers will be available to new subscribers.

+

+ batchGet(packageName, productId, basePlanId, body=None, x__xgafv=None)

+

Reads one or more subscription offers.

+

+ batchUpdate(packageName, productId, basePlanId, body=None, x__xgafv=None)

+

Updates a batch of subscription offers. Set the latencyTolerance field on nested requests to PRODUCT_UPDATE_LATENCY_TOLERANCE_LATENCY_TOLERANT to achieve maximum update throughput.

+

+ batchUpdateStates(packageName, productId, basePlanId, body=None, x__xgafv=None)

+

Updates a batch of subscription offer states. Set the latencyTolerance field on nested requests to PRODUCT_UPDATE_LATENCY_TOLERANCE_LATENCY_TOLERANT to achieve maximum update throughput.

close()

Close httplib2 connections.

@@ -99,7 +108,7 @@

Instance Methods

list_next()

Retrieves the next page of results.

- patch(packageName, productId, basePlanId, offerId, body=None, regionsVersion_version=None, updateMask=None, x__xgafv=None)

+ patch(packageName, productId, basePlanId, offerId, allowMissing=None, body=None, latencyTolerance=None, regionsVersion_version=None, updateMask=None, x__xgafv=None)

Updates an existing subscription offer.

Method Details

@@ -115,6 +124,11 @@

Method Details

The object takes the form of: { # Request message for ActivateSubscriptionOffer. + "basePlanId": "A String", # Required. The parent base plan (ID) of the offer to activate. + "latencyTolerance": "A String", # Optional. The latency tolerance for the propagation of this product update. Defaults to latency-sensitive. + "offerId": "A String", # Required. The unique offer ID of the offer to activate. + "packageName": "A String", # Required. The parent app (package name) of the offer to activate. + "productId": "A String", # Required. The parent subscription (ID) of the offer to activate. } x__xgafv: string, V1 error format. @@ -211,6 +225,462 @@

Method Details

}
+
+ batchGet(packageName, productId, basePlanId, body=None, x__xgafv=None) +
Reads one or more subscription offers.
+
+Args:
+  packageName: string, Required. The parent app (package name) for which the subscriptions should be created or updated. Must be equal to the package_name field on all the requests. (required)
+  productId: string, Required. The product ID of the parent subscription, if all updated offers belong to the same subscription. If this request spans multiple subscriptions, set this field to "-". Must be set. (required)
+  basePlanId: string, Required. The parent base plan (ID) for which the offers should be read. May be specified as '-' to read offers from multiple base plans. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Request message for BatchGetSubscriptionOffers endpoint.
+  "requests": [ # Required. A list of update requests of up to 100 elements. All requests must update different subscriptions.
+    { # Request message for GetSubscriptionOffer.
+      "basePlanId": "A String", # Required. The parent base plan (ID) of the offer to get.
+      "offerId": "A String", # Required. The unique offer ID of the offer to get.
+      "packageName": "A String", # Required. The parent app (package name) of the offer to get.
+      "productId": "A String", # Required. The parent subscription (ID) of the offer to get.
+    },
+  ],
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response message for BatchGetSubscriptionOffers endpoint.
+  "subscriptionOffers": [
+    { # A single, temporary offer
+      "basePlanId": "A String", # Required. Immutable. The ID of the base plan to which this offer is an extension.
+      "offerId": "A String", # Required. Immutable. Unique ID of this subscription offer. Must be unique within the base plan.
+      "offerTags": [ # List of up to 20 custom tags specified for this offer, and returned to the app through the billing library.
+        { # Represents a custom tag specified for base plans and subscription offers.
+          "tag": "A String", # Must conform with RFC-1034. That is, this string can only contain lower-case letters (a-z), numbers (0-9), and hyphens (-), and be at most 20 characters.
+        },
+      ],
+      "otherRegionsConfig": { # Configuration for any new locations Play may launch in specified on a subscription offer. # The configuration for any new locations Play may launch in the future.
+        "otherRegionsNewSubscriberAvailability": True or False, # Whether the subscription offer in any new locations Play may launch in the future. If not specified, this will default to false.
+      },
+      "packageName": "A String", # Required. Immutable. The package name of the app the parent subscription belongs to.
+      "phases": [ # Required. The phases of this subscription offer. Must contain at least one entry, and may contain at most five. Users will always receive all these phases in the specified order. Phases may not be added, removed, or reordered after initial creation.
+        { # A single phase of a subscription offer.
+          "duration": "A String", # Required. The duration of a single recurrence of this phase. Specified in ISO 8601 format.
+          "otherRegionsConfig": { # Configuration for any new locations Play may launch in for a single offer phase. # Pricing information for any new locations Play may launch in.
+            "absoluteDiscounts": { # Pricing information for any new locations Play may launch in. # The absolute amount of money subtracted from the base plan price prorated over the phase duration that the user pays for this offer phase. For example, if the base plan price for this region is $12 for a period of 1 year, then a $1 absolute discount for a phase of a duration of 3 months would correspond to a price of $2. The resulting price may not be smaller than the minimum price allowed for any new locations Play may launch in.
+              "eurPrice": { # Represents an amount of money with its currency type. # Required. Price in EUR to use for any new locations Play may launch in.
+                "currencyCode": "A String", # The three-letter currency code defined in ISO 4217.
+                "nanos": 42, # Number of nano (10^-9) units of the amount. The value must be between -999,999,999 and +999,999,999 inclusive. If `units` is positive, `nanos` must be positive or zero. If `units` is zero, `nanos` can be positive, zero, or negative. If `units` is negative, `nanos` must be negative or zero. For example $-1.75 is represented as `units`=-1 and `nanos`=-750,000,000.
+                "units": "A String", # The whole units of the amount. For example if `currencyCode` is `"USD"`, then 1 unit is one US dollar.
+              },
+              "usdPrice": { # Represents an amount of money with its currency type. # Required. Price in USD to use for any new locations Play may launch in.
+                "currencyCode": "A String", # The three-letter currency code defined in ISO 4217.
+                "nanos": 42, # Number of nano (10^-9) units of the amount. The value must be between -999,999,999 and +999,999,999 inclusive. If `units` is positive, `nanos` must be positive or zero. If `units` is zero, `nanos` can be positive, zero, or negative. If `units` is negative, `nanos` must be negative or zero. For example $-1.75 is represented as `units`=-1 and `nanos`=-750,000,000.
+                "units": "A String", # The whole units of the amount. For example if `currencyCode` is `"USD"`, then 1 unit is one US dollar.
+              },
+            },
+            "otherRegionsPrices": { # Pricing information for any new locations Play may launch in. # The absolute price the user pays for this offer phase. The price must not be smaller than the minimum price allowed for any new locations Play may launch in.
+              "eurPrice": { # Represents an amount of money with its currency type. # Required. Price in EUR to use for any new locations Play may launch in.
+                "currencyCode": "A String", # The three-letter currency code defined in ISO 4217.
+                "nanos": 42, # Number of nano (10^-9) units of the amount. The value must be between -999,999,999 and +999,999,999 inclusive. If `units` is positive, `nanos` must be positive or zero. If `units` is zero, `nanos` can be positive, zero, or negative. If `units` is negative, `nanos` must be negative or zero. For example $-1.75 is represented as `units`=-1 and `nanos`=-750,000,000.
+                "units": "A String", # The whole units of the amount. For example if `currencyCode` is `"USD"`, then 1 unit is one US dollar.
+              },
+              "usdPrice": { # Represents an amount of money with its currency type. # Required. Price in USD to use for any new locations Play may launch in.
+                "currencyCode": "A String", # The three-letter currency code defined in ISO 4217.
+                "nanos": 42, # Number of nano (10^-9) units of the amount. The value must be between -999,999,999 and +999,999,999 inclusive. If `units` is positive, `nanos` must be positive or zero. If `units` is zero, `nanos` can be positive, zero, or negative. If `units` is negative, `nanos` must be negative or zero. For example $-1.75 is represented as `units`=-1 and `nanos`=-750,000,000.
+                "units": "A String", # The whole units of the amount. For example if `currencyCode` is `"USD"`, then 1 unit is one US dollar.
+              },
+            },
+            "relativeDiscount": 3.14, # The fraction of the base plan price prorated over the phase duration that the user pays for this offer phase. For example, if the base plan price for this region is $12 for a period of 1 year, then a 50% discount for a phase of a duration of 3 months would correspond to a price of $1.50. The discount must be specified as a fraction strictly larger than 0 and strictly smaller than 1. The resulting price will be rounded to the nearest billable unit (e.g. cents for USD). The relative discount is considered invalid if the discounted price ends up being smaller than the minimum price allowed in any new locations Play may launch in.
+          },
+          "recurrenceCount": 42, # Required. The number of times this phase repeats. If this offer phase is not free, each recurrence charges the user the price of this offer phase.
+          "regionalConfigs": [ # Required. The region-specific configuration of this offer phase. This list must contain exactly one entry for each region for which the subscription offer has a regional config.
+            { # Configuration for a single phase of a subscription offer in a single region.
+              "absoluteDiscount": { # Represents an amount of money with its currency type. # The absolute amount of money subtracted from the base plan price prorated over the phase duration that the user pays for this offer phase. For example, if the base plan price for this region is $12 for a period of 1 year, then a $1 absolute discount for a phase of a duration of 3 months would correspond to a price of $2. The resulting price may not be smaller than the minimum price allowed for this region.
+                "currencyCode": "A String", # The three-letter currency code defined in ISO 4217.
+                "nanos": 42, # Number of nano (10^-9) units of the amount. The value must be between -999,999,999 and +999,999,999 inclusive. If `units` is positive, `nanos` must be positive or zero. If `units` is zero, `nanos` can be positive, zero, or negative. If `units` is negative, `nanos` must be negative or zero. For example $-1.75 is represented as `units`=-1 and `nanos`=-750,000,000.
+                "units": "A String", # The whole units of the amount. For example if `currencyCode` is `"USD"`, then 1 unit is one US dollar.
+              },
+              "price": { # Represents an amount of money with its currency type. # The absolute price the user pays for this offer phase. The price must not be smaller than the minimum price allowed for this region.
+                "currencyCode": "A String", # The three-letter currency code defined in ISO 4217.
+                "nanos": 42, # Number of nano (10^-9) units of the amount. The value must be between -999,999,999 and +999,999,999 inclusive. If `units` is positive, `nanos` must be positive or zero. If `units` is zero, `nanos` can be positive, zero, or negative. If `units` is negative, `nanos` must be negative or zero. For example $-1.75 is represented as `units`=-1 and `nanos`=-750,000,000.
+                "units": "A String", # The whole units of the amount. For example if `currencyCode` is `"USD"`, then 1 unit is one US dollar.
+              },
+              "regionCode": "A String", # Required. Immutable. The region to which this config applies.
+              "relativeDiscount": 3.14, # The fraction of the base plan price prorated over the phase duration that the user pays for this offer phase. For example, if the base plan price for this region is $12 for a period of 1 year, then a 50% discount for a phase of a duration of 3 months would correspond to a price of $1.50. The discount must be specified as a fraction strictly larger than 0 and strictly smaller than 1. The resulting price will be rounded to the nearest billable unit (e.g. cents for USD). The relative discount is considered invalid if the discounted price ends up being smaller than the minimum price allowed in this region.
+            },
+          ],
+        },
+      ],
+      "productId": "A String", # Required. Immutable. The ID of the parent subscription this offer belongs to.
+      "regionalConfigs": [ # Required. The region-specific configuration of this offer. Must contain at least one entry.
+        { # Configuration for a subscription offer in a single region.
+          "newSubscriberAvailability": True or False, # Whether the subscription offer in the specified region is available for new subscribers. Existing subscribers will not have their subscription cancelled if this value is set to false. If not specified, this will default to false.
+          "regionCode": "A String", # Required. Immutable. Region code this configuration applies to, as defined by ISO 3166-2, e.g. "US".
+        },
+      ],
+      "state": "A String", # Output only. The current state of this offer. Can be changed using Activate and Deactivate actions. NB: the base plan state supersedes this state, so an active offer may not be available if the base plan is not active.
+      "targeting": { # Defines the rule a user needs to satisfy to receive this offer. # The requirements that users need to fulfil to be eligible for this offer. Represents the requirements that Play will evaluate to decide whether an offer should be returned. Developers may further filter these offers themselves.
+        "acquisitionRule": { # Represents a targeting rule of the form: User never had {scope} before. # Offer targeting rule for new user acquisition.
+          "scope": { # Defines the scope of subscriptions which a targeting rule can match to target offers to users based on past or current entitlement. # Required. The scope of subscriptions this rule considers. Only allows "this subscription" and "any subscription in app".
+            "specificSubscriptionInApp": "A String", # The scope of the current targeting rule is the subscription with the specified subscription ID. Must be a subscription within the same parent app.
+          },
+        },
+        "upgradeRule": { # Represents a targeting rule of the form: User currently has {scope} [with billing period {billing_period}]. # Offer targeting rule for upgrading users' existing plans.
+          "billingPeriodDuration": "A String", # The specific billing period duration, specified in ISO 8601 format, that a user must be currently subscribed to to be eligible for this rule. If not specified, users subscribed to any billing period are matched.
+          "oncePerUser": True or False, # Limit this offer to only once per user. If set to true, a user can never be eligible for this offer again if they ever subscribed to this offer.
+          "scope": { # Defines the scope of subscriptions which a targeting rule can match to target offers to users based on past or current entitlement. # Required. The scope of subscriptions this rule considers. Only allows "this subscription" and "specific subscription in app".
+            "specificSubscriptionInApp": "A String", # The scope of the current targeting rule is the subscription with the specified subscription ID. Must be a subscription within the same parent app.
+          },
+        },
+      },
+    },
+  ],
+}
+
+ +
+ batchUpdate(packageName, productId, basePlanId, body=None, x__xgafv=None) +
Updates a batch of subscription offers. Set the latencyTolerance field on nested requests to PRODUCT_UPDATE_LATENCY_TOLERANCE_LATENCY_TOLERANT to achieve maximum update throughput.
+
+Args:
+  packageName: string, Required. The parent app (package name) of the updated subscription offers. Must be equal to the package_name field on all the updated SubscriptionOffer resources. (required)
+  productId: string, Required. The product ID of the parent subscription, if all updated offers belong to the same subscription. If this request spans multiple subscriptions, set this field to "-". Must be set. (required)
+  basePlanId: string, Required. The parent base plan (ID) for which the offers should be updated. May be specified as '-' to update offers from multiple base plans. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Request message for BatchUpdateSubscriptionOffers.
+  "requests": [ # Required. A list of update requests of up to 100 elements. All requests must update different subscription offers.
+    { # Request message for UpdateSubscriptionOffer.
+      "allowMissing": True or False, # Optional. If set to true, and the subscription offer with the given package_name, product_id, base_plan_id and offer_id doesn't exist, an offer will be created. If a new offer is created, update_mask is ignored.
+      "latencyTolerance": "A String", # Optional. The latency tolerance for the propagation of this product update. Defaults to latency-sensitive.
+      "regionsVersion": { # The version of the available regions being used for the specified resource. # Required. The version of the available regions being used for the subscription_offer.
+        "version": "A String", # Required. A string representing the version of available regions being used for the specified resource. Regional prices for the resource have to be specified according to the information published in [this article](https://support.google.com/googleplay/android-developer/answer/10532353). Each time the supported locations substantially change, the version will be incremented. Using this field will ensure that creating and updating the resource with an older region's version and set of regional prices and currencies will succeed even though a new version is available. The latest version is 2022/02.
+      },
+      "subscriptionOffer": { # A single, temporary offer # Required. The subscription offer to update.
+        "basePlanId": "A String", # Required. Immutable. The ID of the base plan to which this offer is an extension.
+        "offerId": "A String", # Required. Immutable. Unique ID of this subscription offer. Must be unique within the base plan.
+        "offerTags": [ # List of up to 20 custom tags specified for this offer, and returned to the app through the billing library.
+          { # Represents a custom tag specified for base plans and subscription offers.
+            "tag": "A String", # Must conform with RFC-1034. That is, this string can only contain lower-case letters (a-z), numbers (0-9), and hyphens (-), and be at most 20 characters.
+          },
+        ],
+        "otherRegionsConfig": { # Configuration for any new locations Play may launch in specified on a subscription offer. # The configuration for any new locations Play may launch in the future.
+          "otherRegionsNewSubscriberAvailability": True or False, # Whether the subscription offer in any new locations Play may launch in the future. If not specified, this will default to false.
+        },
+        "packageName": "A String", # Required. Immutable. The package name of the app the parent subscription belongs to.
+        "phases": [ # Required. The phases of this subscription offer. Must contain at least one entry, and may contain at most five. Users will always receive all these phases in the specified order. Phases may not be added, removed, or reordered after initial creation.
+          { # A single phase of a subscription offer.
+            "duration": "A String", # Required. The duration of a single recurrence of this phase. Specified in ISO 8601 format.
+            "otherRegionsConfig": { # Configuration for any new locations Play may launch in for a single offer phase. # Pricing information for any new locations Play may launch in.
+              "absoluteDiscounts": { # Pricing information for any new locations Play may launch in. # The absolute amount of money subtracted from the base plan price prorated over the phase duration that the user pays for this offer phase. For example, if the base plan price for this region is $12 for a period of 1 year, then a $1 absolute discount for a phase of a duration of 3 months would correspond to a price of $2. The resulting price may not be smaller than the minimum price allowed for any new locations Play may launch in.
+                "eurPrice": { # Represents an amount of money with its currency type. # Required. Price in EUR to use for any new locations Play may launch in.
+                  "currencyCode": "A String", # The three-letter currency code defined in ISO 4217.
+                  "nanos": 42, # Number of nano (10^-9) units of the amount. The value must be between -999,999,999 and +999,999,999 inclusive. If `units` is positive, `nanos` must be positive or zero. If `units` is zero, `nanos` can be positive, zero, or negative. If `units` is negative, `nanos` must be negative or zero. For example $-1.75 is represented as `units`=-1 and `nanos`=-750,000,000.
+                  "units": "A String", # The whole units of the amount. For example if `currencyCode` is `"USD"`, then 1 unit is one US dollar.
+                },
+                "usdPrice": { # Represents an amount of money with its currency type. # Required. Price in USD to use for any new locations Play may launch in.
+                  "currencyCode": "A String", # The three-letter currency code defined in ISO 4217.
+                  "nanos": 42, # Number of nano (10^-9) units of the amount. The value must be between -999,999,999 and +999,999,999 inclusive. If `units` is positive, `nanos` must be positive or zero. If `units` is zero, `nanos` can be positive, zero, or negative. If `units` is negative, `nanos` must be negative or zero. For example $-1.75 is represented as `units`=-1 and `nanos`=-750,000,000.
+                  "units": "A String", # The whole units of the amount. For example if `currencyCode` is `"USD"`, then 1 unit is one US dollar.
+                },
+              },
+              "otherRegionsPrices": { # Pricing information for any new locations Play may launch in. # The absolute price the user pays for this offer phase. The price must not be smaller than the minimum price allowed for any new locations Play may launch in.
+                "eurPrice": { # Represents an amount of money with its currency type. # Required. Price in EUR to use for any new locations Play may launch in.
+                  "currencyCode": "A String", # The three-letter currency code defined in ISO 4217.
+                  "nanos": 42, # Number of nano (10^-9) units of the amount. The value must be between -999,999,999 and +999,999,999 inclusive. If `units` is positive, `nanos` must be positive or zero. If `units` is zero, `nanos` can be positive, zero, or negative. If `units` is negative, `nanos` must be negative or zero. For example $-1.75 is represented as `units`=-1 and `nanos`=-750,000,000.
+                  "units": "A String", # The whole units of the amount. For example if `currencyCode` is `"USD"`, then 1 unit is one US dollar.
+                },
+                "usdPrice": { # Represents an amount of money with its currency type. # Required. Price in USD to use for any new locations Play may launch in.
+                  "currencyCode": "A String", # The three-letter currency code defined in ISO 4217.
+                  "nanos": 42, # Number of nano (10^-9) units of the amount. The value must be between -999,999,999 and +999,999,999 inclusive. If `units` is positive, `nanos` must be positive or zero. If `units` is zero, `nanos` can be positive, zero, or negative. If `units` is negative, `nanos` must be negative or zero. For example $-1.75 is represented as `units`=-1 and `nanos`=-750,000,000.
+                  "units": "A String", # The whole units of the amount. For example if `currencyCode` is `"USD"`, then 1 unit is one US dollar.
+                },
+              },
+              "relativeDiscount": 3.14, # The fraction of the base plan price prorated over the phase duration that the user pays for this offer phase. For example, if the base plan price for this region is $12 for a period of 1 year, then a 50% discount for a phase of a duration of 3 months would correspond to a price of $1.50. The discount must be specified as a fraction strictly larger than 0 and strictly smaller than 1. The resulting price will be rounded to the nearest billable unit (e.g. cents for USD). The relative discount is considered invalid if the discounted price ends up being smaller than the minimum price allowed in any new locations Play may launch in.
+            },
+            "recurrenceCount": 42, # Required. The number of times this phase repeats. If this offer phase is not free, each recurrence charges the user the price of this offer phase.
+            "regionalConfigs": [ # Required. The region-specific configuration of this offer phase. This list must contain exactly one entry for each region for which the subscription offer has a regional config.
+              { # Configuration for a single phase of a subscription offer in a single region.
+                "absoluteDiscount": { # Represents an amount of money with its currency type. # The absolute amount of money subtracted from the base plan price prorated over the phase duration that the user pays for this offer phase. For example, if the base plan price for this region is $12 for a period of 1 year, then a $1 absolute discount for a phase of a duration of 3 months would correspond to a price of $2. The resulting price may not be smaller than the minimum price allowed for this region.
+                  "currencyCode": "A String", # The three-letter currency code defined in ISO 4217.
+                  "nanos": 42, # Number of nano (10^-9) units of the amount. The value must be between -999,999,999 and +999,999,999 inclusive. If `units` is positive, `nanos` must be positive or zero. If `units` is zero, `nanos` can be positive, zero, or negative. If `units` is negative, `nanos` must be negative or zero. For example $-1.75 is represented as `units`=-1 and `nanos`=-750,000,000.
+                  "units": "A String", # The whole units of the amount. For example if `currencyCode` is `"USD"`, then 1 unit is one US dollar.
+                },
+                "price": { # Represents an amount of money with its currency type. # The absolute price the user pays for this offer phase. The price must not be smaller than the minimum price allowed for this region.
+                  "currencyCode": "A String", # The three-letter currency code defined in ISO 4217.
+                  "nanos": 42, # Number of nano (10^-9) units of the amount. The value must be between -999,999,999 and +999,999,999 inclusive. If `units` is positive, `nanos` must be positive or zero. If `units` is zero, `nanos` can be positive, zero, or negative. If `units` is negative, `nanos` must be negative or zero. For example $-1.75 is represented as `units`=-1 and `nanos`=-750,000,000.
+                  "units": "A String", # The whole units of the amount. For example if `currencyCode` is `"USD"`, then 1 unit is one US dollar.
+                },
+                "regionCode": "A String", # Required. Immutable. The region to which this config applies.
+                "relativeDiscount": 3.14, # The fraction of the base plan price prorated over the phase duration that the user pays for this offer phase. For example, if the base plan price for this region is $12 for a period of 1 year, then a 50% discount for a phase of a duration of 3 months would correspond to a price of $1.50. The discount must be specified as a fraction strictly larger than 0 and strictly smaller than 1. The resulting price will be rounded to the nearest billable unit (e.g. cents for USD). The relative discount is considered invalid if the discounted price ends up being smaller than the minimum price allowed in this region.
+              },
+            ],
+          },
+        ],
+        "productId": "A String", # Required. Immutable. The ID of the parent subscription this offer belongs to.
+        "regionalConfigs": [ # Required. The region-specific configuration of this offer. Must contain at least one entry.
+          { # Configuration for a subscription offer in a single region.
+            "newSubscriberAvailability": True or False, # Whether the subscription offer in the specified region is available for new subscribers. Existing subscribers will not have their subscription cancelled if this value is set to false. If not specified, this will default to false.
+            "regionCode": "A String", # Required. Immutable. Region code this configuration applies to, as defined by ISO 3166-2, e.g. "US".
+          },
+        ],
+        "state": "A String", # Output only. The current state of this offer. Can be changed using Activate and Deactivate actions. NB: the base plan state supersedes this state, so an active offer may not be available if the base plan is not active.
+        "targeting": { # Defines the rule a user needs to satisfy to receive this offer. # The requirements that users need to fulfil to be eligible for this offer. Represents the requirements that Play will evaluate to decide whether an offer should be returned. Developers may further filter these offers themselves.
+          "acquisitionRule": { # Represents a targeting rule of the form: User never had {scope} before. # Offer targeting rule for new user acquisition.
+            "scope": { # Defines the scope of subscriptions which a targeting rule can match to target offers to users based on past or current entitlement. # Required. The scope of subscriptions this rule considers. Only allows "this subscription" and "any subscription in app".
+              "specificSubscriptionInApp": "A String", # The scope of the current targeting rule is the subscription with the specified subscription ID. Must be a subscription within the same parent app.
+            },
+          },
+          "upgradeRule": { # Represents a targeting rule of the form: User currently has {scope} [with billing period {billing_period}]. # Offer targeting rule for upgrading users' existing plans.
+            "billingPeriodDuration": "A String", # The specific billing period duration, specified in ISO 8601 format, that a user must be currently subscribed to to be eligible for this rule. If not specified, users subscribed to any billing period are matched.
+            "oncePerUser": True or False, # Limit this offer to only once per user. If set to true, a user can never be eligible for this offer again if they ever subscribed to this offer.
+            "scope": { # Defines the scope of subscriptions which a targeting rule can match to target offers to users based on past or current entitlement. # Required. The scope of subscriptions this rule considers. Only allows "this subscription" and "specific subscription in app".
+              "specificSubscriptionInApp": "A String", # The scope of the current targeting rule is the subscription with the specified subscription ID. Must be a subscription within the same parent app.
+            },
+          },
+        },
+      },
+      "updateMask": "A String", # Required. The list of fields to be updated.
+    },
+  ],
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response message for BatchUpdateSubscriptionOffers.
+  "subscriptionOffers": [ # The updated subscription offers list.
+    { # A single, temporary offer
+      "basePlanId": "A String", # Required. Immutable. The ID of the base plan to which this offer is an extension.
+      "offerId": "A String", # Required. Immutable. Unique ID of this subscription offer. Must be unique within the base plan.
+      "offerTags": [ # List of up to 20 custom tags specified for this offer, and returned to the app through the billing library.
+        { # Represents a custom tag specified for base plans and subscription offers.
+          "tag": "A String", # Must conform with RFC-1034. That is, this string can only contain lower-case letters (a-z), numbers (0-9), and hyphens (-), and be at most 20 characters.
+        },
+      ],
+      "otherRegionsConfig": { # Configuration for any new locations Play may launch in specified on a subscription offer. # The configuration for any new locations Play may launch in the future.
+        "otherRegionsNewSubscriberAvailability": True or False, # Whether the subscription offer in any new locations Play may launch in the future. If not specified, this will default to false.
+      },
+      "packageName": "A String", # Required. Immutable. The package name of the app the parent subscription belongs to.
+      "phases": [ # Required. The phases of this subscription offer. Must contain at least one entry, and may contain at most five. Users will always receive all these phases in the specified order. Phases may not be added, removed, or reordered after initial creation.
+        { # A single phase of a subscription offer.
+          "duration": "A String", # Required. The duration of a single recurrence of this phase. Specified in ISO 8601 format.
+          "otherRegionsConfig": { # Configuration for any new locations Play may launch in for a single offer phase. # Pricing information for any new locations Play may launch in.
+            "absoluteDiscounts": { # Pricing information for any new locations Play may launch in. # The absolute amount of money subtracted from the base plan price prorated over the phase duration that the user pays for this offer phase. For example, if the base plan price for this region is $12 for a period of 1 year, then a $1 absolute discount for a phase of a duration of 3 months would correspond to a price of $2. The resulting price may not be smaller than the minimum price allowed for any new locations Play may launch in.
+              "eurPrice": { # Represents an amount of money with its currency type. # Required. Price in EUR to use for any new locations Play may launch in.
+                "currencyCode": "A String", # The three-letter currency code defined in ISO 4217.
+                "nanos": 42, # Number of nano (10^-9) units of the amount. The value must be between -999,999,999 and +999,999,999 inclusive. If `units` is positive, `nanos` must be positive or zero. If `units` is zero, `nanos` can be positive, zero, or negative. If `units` is negative, `nanos` must be negative or zero. For example $-1.75 is represented as `units`=-1 and `nanos`=-750,000,000.
+                "units": "A String", # The whole units of the amount. For example if `currencyCode` is `"USD"`, then 1 unit is one US dollar.
+              },
+              "usdPrice": { # Represents an amount of money with its currency type. # Required. Price in USD to use for any new locations Play may launch in.
+                "currencyCode": "A String", # The three-letter currency code defined in ISO 4217.
+                "nanos": 42, # Number of nano (10^-9) units of the amount. The value must be between -999,999,999 and +999,999,999 inclusive. If `units` is positive, `nanos` must be positive or zero. If `units` is zero, `nanos` can be positive, zero, or negative. If `units` is negative, `nanos` must be negative or zero. For example $-1.75 is represented as `units`=-1 and `nanos`=-750,000,000.
+                "units": "A String", # The whole units of the amount. For example if `currencyCode` is `"USD"`, then 1 unit is one US dollar.
+              },
+            },
+            "otherRegionsPrices": { # Pricing information for any new locations Play may launch in. # The absolute price the user pays for this offer phase. The price must not be smaller than the minimum price allowed for any new locations Play may launch in.
+              "eurPrice": { # Represents an amount of money with its currency type. # Required. Price in EUR to use for any new locations Play may launch in.
+                "currencyCode": "A String", # The three-letter currency code defined in ISO 4217.
+                "nanos": 42, # Number of nano (10^-9) units of the amount. The value must be between -999,999,999 and +999,999,999 inclusive. If `units` is positive, `nanos` must be positive or zero. If `units` is zero, `nanos` can be positive, zero, or negative. If `units` is negative, `nanos` must be negative or zero. For example $-1.75 is represented as `units`=-1 and `nanos`=-750,000,000.
+                "units": "A String", # The whole units of the amount. For example if `currencyCode` is `"USD"`, then 1 unit is one US dollar.
+              },
+              "usdPrice": { # Represents an amount of money with its currency type. # Required. Price in USD to use for any new locations Play may launch in.
+                "currencyCode": "A String", # The three-letter currency code defined in ISO 4217.
+                "nanos": 42, # Number of nano (10^-9) units of the amount. The value must be between -999,999,999 and +999,999,999 inclusive. If `units` is positive, `nanos` must be positive or zero. If `units` is zero, `nanos` can be positive, zero, or negative. If `units` is negative, `nanos` must be negative or zero. For example $-1.75 is represented as `units`=-1 and `nanos`=-750,000,000.
+                "units": "A String", # The whole units of the amount. For example if `currencyCode` is `"USD"`, then 1 unit is one US dollar.
+              },
+            },
+            "relativeDiscount": 3.14, # The fraction of the base plan price prorated over the phase duration that the user pays for this offer phase. For example, if the base plan price for this region is $12 for a period of 1 year, then a 50% discount for a phase of a duration of 3 months would correspond to a price of $1.50. The discount must be specified as a fraction strictly larger than 0 and strictly smaller than 1. The resulting price will be rounded to the nearest billable unit (e.g. cents for USD). The relative discount is considered invalid if the discounted price ends up being smaller than the minimum price allowed in any new locations Play may launch in.
+          },
+          "recurrenceCount": 42, # Required. The number of times this phase repeats. If this offer phase is not free, each recurrence charges the user the price of this offer phase.
+          "regionalConfigs": [ # Required. The region-specific configuration of this offer phase. This list must contain exactly one entry for each region for which the subscription offer has a regional config.
+            { # Configuration for a single phase of a subscription offer in a single region.
+              "absoluteDiscount": { # Represents an amount of money with its currency type. # The absolute amount of money subtracted from the base plan price prorated over the phase duration that the user pays for this offer phase. For example, if the base plan price for this region is $12 for a period of 1 year, then a $1 absolute discount for a phase of a duration of 3 months would correspond to a price of $2. The resulting price may not be smaller than the minimum price allowed for this region.
+                "currencyCode": "A String", # The three-letter currency code defined in ISO 4217.
+                "nanos": 42, # Number of nano (10^-9) units of the amount. The value must be between -999,999,999 and +999,999,999 inclusive. If `units` is positive, `nanos` must be positive or zero. If `units` is zero, `nanos` can be positive, zero, or negative. If `units` is negative, `nanos` must be negative or zero. For example $-1.75 is represented as `units`=-1 and `nanos`=-750,000,000.
+                "units": "A String", # The whole units of the amount. For example if `currencyCode` is `"USD"`, then 1 unit is one US dollar.
+              },
+              "price": { # Represents an amount of money with its currency type. # The absolute price the user pays for this offer phase. The price must not be smaller than the minimum price allowed for this region.
+                "currencyCode": "A String", # The three-letter currency code defined in ISO 4217.
+                "nanos": 42, # Number of nano (10^-9) units of the amount. The value must be between -999,999,999 and +999,999,999 inclusive. If `units` is positive, `nanos` must be positive or zero. If `units` is zero, `nanos` can be positive, zero, or negative. If `units` is negative, `nanos` must be negative or zero. For example $-1.75 is represented as `units`=-1 and `nanos`=-750,000,000.
+                "units": "A String", # The whole units of the amount. For example if `currencyCode` is `"USD"`, then 1 unit is one US dollar.
+              },
+              "regionCode": "A String", # Required. Immutable. The region to which this config applies.
+              "relativeDiscount": 3.14, # The fraction of the base plan price prorated over the phase duration that the user pays for this offer phase. For example, if the base plan price for this region is $12 for a period of 1 year, then a 50% discount for a phase of a duration of 3 months would correspond to a price of $1.50. The discount must be specified as a fraction strictly larger than 0 and strictly smaller than 1. The resulting price will be rounded to the nearest billable unit (e.g. cents for USD). The relative discount is considered invalid if the discounted price ends up being smaller than the minimum price allowed in this region.
+            },
+          ],
+        },
+      ],
+      "productId": "A String", # Required. Immutable. The ID of the parent subscription this offer belongs to.
+      "regionalConfigs": [ # Required. The region-specific configuration of this offer. Must contain at least one entry.
+        { # Configuration for a subscription offer in a single region.
+          "newSubscriberAvailability": True or False, # Whether the subscription offer in the specified region is available for new subscribers. Existing subscribers will not have their subscription cancelled if this value is set to false. If not specified, this will default to false.
+          "regionCode": "A String", # Required. Immutable. Region code this configuration applies to, as defined by ISO 3166-2, e.g. "US".
+        },
+      ],
+      "state": "A String", # Output only. The current state of this offer. Can be changed using Activate and Deactivate actions. NB: the base plan state supersedes this state, so an active offer may not be available if the base plan is not active.
+      "targeting": { # Defines the rule a user needs to satisfy to receive this offer. # The requirements that users need to fulfil to be eligible for this offer. Represents the requirements that Play will evaluate to decide whether an offer should be returned. Developers may further filter these offers themselves.
+        "acquisitionRule": { # Represents a targeting rule of the form: User never had {scope} before. # Offer targeting rule for new user acquisition.
+          "scope": { # Defines the scope of subscriptions which a targeting rule can match to target offers to users based on past or current entitlement. # Required. The scope of subscriptions this rule considers. Only allows "this subscription" and "any subscription in app".
+            "specificSubscriptionInApp": "A String", # The scope of the current targeting rule is the subscription with the specified subscription ID. Must be a subscription within the same parent app.
+          },
+        },
+        "upgradeRule": { # Represents a targeting rule of the form: User currently has {scope} [with billing period {billing_period}]. # Offer targeting rule for upgrading users' existing plans.
+          "billingPeriodDuration": "A String", # The specific billing period duration, specified in ISO 8601 format, that a user must be currently subscribed to to be eligible for this rule. If not specified, users subscribed to any billing period are matched.
+          "oncePerUser": True or False, # Limit this offer to only once per user. If set to true, a user can never be eligible for this offer again if they ever subscribed to this offer.
+          "scope": { # Defines the scope of subscriptions which a targeting rule can match to target offers to users based on past or current entitlement. # Required. The scope of subscriptions this rule considers. Only allows "this subscription" and "specific subscription in app".
+            "specificSubscriptionInApp": "A String", # The scope of the current targeting rule is the subscription with the specified subscription ID. Must be a subscription within the same parent app.
+          },
+        },
+      },
+    },
+  ],
+}
+
+ +
+ batchUpdateStates(packageName, productId, basePlanId, body=None, x__xgafv=None) +
Updates a batch of subscription offer states. Set the latencyTolerance field on nested requests to PRODUCT_UPDATE_LATENCY_TOLERANCE_LATENCY_TOLERANT to achieve maximum update throughput.
+
+Args:
+  packageName: string, Required. The parent app (package name) of the updated subscription offers. Must be equal to the package_name field on all the updated SubscriptionOffer resources. (required)
+  productId: string, Required. The product ID of the parent subscription, if all updated offers belong to the same subscription. If this request spans multiple subscriptions, set this field to "-". Must be set. (required)
+  basePlanId: string, Required. The parent base plan (ID) for which the offers should be updated. May be specified as '-' to update offers from multiple base plans. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Request message for BatchUpdateSubscriptionOfferStates.
+  "requests": [ # Required. The update request list of up to 100 elements. All requests must update different offers.
+    { # Request message to update the state of a subscription offer.
+      "activateSubscriptionOfferRequest": { # Request message for ActivateSubscriptionOffer. # Activates an offer. Once activated, the offer will be available to new subscribers.
+        "basePlanId": "A String", # Required. The parent base plan (ID) of the offer to activate.
+        "latencyTolerance": "A String", # Optional. The latency tolerance for the propagation of this product update. Defaults to latency-sensitive.
+        "offerId": "A String", # Required. The unique offer ID of the offer to activate.
+        "packageName": "A String", # Required. The parent app (package name) of the offer to activate.
+        "productId": "A String", # Required. The parent subscription (ID) of the offer to activate.
+      },
+      "deactivateSubscriptionOfferRequest": { # Request message for DeactivateSubscriptionOffer. # Deactivates an offer. Once deactivated, the offer will become unavailable to new subscribers, but existing subscribers will maintain their subscription
+        "basePlanId": "A String", # Required. The parent base plan (ID) of the offer to deactivate.
+        "latencyTolerance": "A String", # Optional. The latency tolerance for the propagation of this product update. Defaults to latency-sensitive.
+        "offerId": "A String", # Required. The unique offer ID of the offer to deactivate.
+        "packageName": "A String", # Required. The parent app (package name) of the offer to deactivate.
+        "productId": "A String", # Required. The parent subscription (ID) of the offer to deactivate.
+      },
+    },
+  ],
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response message for BatchUpdateSubscriptionOfferStates.
+  "subscriptionOffers": [ # The updated subscription offers list.
+    { # A single, temporary offer
+      "basePlanId": "A String", # Required. Immutable. The ID of the base plan to which this offer is an extension.
+      "offerId": "A String", # Required. Immutable. Unique ID of this subscription offer. Must be unique within the base plan.
+      "offerTags": [ # List of up to 20 custom tags specified for this offer, and returned to the app through the billing library.
+        { # Represents a custom tag specified for base plans and subscription offers.
+          "tag": "A String", # Must conform with RFC-1034. That is, this string can only contain lower-case letters (a-z), numbers (0-9), and hyphens (-), and be at most 20 characters.
+        },
+      ],
+      "otherRegionsConfig": { # Configuration for any new locations Play may launch in specified on a subscription offer. # The configuration for any new locations Play may launch in the future.
+        "otherRegionsNewSubscriberAvailability": True or False, # Whether the subscription offer in any new locations Play may launch in the future. If not specified, this will default to false.
+      },
+      "packageName": "A String", # Required. Immutable. The package name of the app the parent subscription belongs to.
+      "phases": [ # Required. The phases of this subscription offer. Must contain at least one entry, and may contain at most five. Users will always receive all these phases in the specified order. Phases may not be added, removed, or reordered after initial creation.
+        { # A single phase of a subscription offer.
+          "duration": "A String", # Required. The duration of a single recurrence of this phase. Specified in ISO 8601 format.
+          "otherRegionsConfig": { # Configuration for any new locations Play may launch in for a single offer phase. # Pricing information for any new locations Play may launch in.
+            "absoluteDiscounts": { # Pricing information for any new locations Play may launch in. # The absolute amount of money subtracted from the base plan price prorated over the phase duration that the user pays for this offer phase. For example, if the base plan price for this region is $12 for a period of 1 year, then a $1 absolute discount for a phase of a duration of 3 months would correspond to a price of $2. The resulting price may not be smaller than the minimum price allowed for any new locations Play may launch in.
+              "eurPrice": { # Represents an amount of money with its currency type. # Required. Price in EUR to use for any new locations Play may launch in.
+                "currencyCode": "A String", # The three-letter currency code defined in ISO 4217.
+                "nanos": 42, # Number of nano (10^-9) units of the amount. The value must be between -999,999,999 and +999,999,999 inclusive. If `units` is positive, `nanos` must be positive or zero. If `units` is zero, `nanos` can be positive, zero, or negative. If `units` is negative, `nanos` must be negative or zero. For example $-1.75 is represented as `units`=-1 and `nanos`=-750,000,000.
+                "units": "A String", # The whole units of the amount. For example if `currencyCode` is `"USD"`, then 1 unit is one US dollar.
+              },
+              "usdPrice": { # Represents an amount of money with its currency type. # Required. Price in USD to use for any new locations Play may launch in.
+                "currencyCode": "A String", # The three-letter currency code defined in ISO 4217.
+                "nanos": 42, # Number of nano (10^-9) units of the amount. The value must be between -999,999,999 and +999,999,999 inclusive. If `units` is positive, `nanos` must be positive or zero. If `units` is zero, `nanos` can be positive, zero, or negative. If `units` is negative, `nanos` must be negative or zero. For example $-1.75 is represented as `units`=-1 and `nanos`=-750,000,000.
+                "units": "A String", # The whole units of the amount. For example if `currencyCode` is `"USD"`, then 1 unit is one US dollar.
+              },
+            },
+            "otherRegionsPrices": { # Pricing information for any new locations Play may launch in. # The absolute price the user pays for this offer phase. The price must not be smaller than the minimum price allowed for any new locations Play may launch in.
+              "eurPrice": { # Represents an amount of money with its currency type. # Required. Price in EUR to use for any new locations Play may launch in.
+                "currencyCode": "A String", # The three-letter currency code defined in ISO 4217.
+                "nanos": 42, # Number of nano (10^-9) units of the amount. The value must be between -999,999,999 and +999,999,999 inclusive. If `units` is positive, `nanos` must be positive or zero. If `units` is zero, `nanos` can be positive, zero, or negative. If `units` is negative, `nanos` must be negative or zero. For example $-1.75 is represented as `units`=-1 and `nanos`=-750,000,000.
+                "units": "A String", # The whole units of the amount. For example if `currencyCode` is `"USD"`, then 1 unit is one US dollar.
+              },
+              "usdPrice": { # Represents an amount of money with its currency type. # Required. Price in USD to use for any new locations Play may launch in.
+                "currencyCode": "A String", # The three-letter currency code defined in ISO 4217.
+                "nanos": 42, # Number of nano (10^-9) units of the amount. The value must be between -999,999,999 and +999,999,999 inclusive. If `units` is positive, `nanos` must be positive or zero. If `units` is zero, `nanos` can be positive, zero, or negative. If `units` is negative, `nanos` must be negative or zero. For example $-1.75 is represented as `units`=-1 and `nanos`=-750,000,000.
+                "units": "A String", # The whole units of the amount. For example if `currencyCode` is `"USD"`, then 1 unit is one US dollar.
+              },
+            },
+            "relativeDiscount": 3.14, # The fraction of the base plan price prorated over the phase duration that the user pays for this offer phase. For example, if the base plan price for this region is $12 for a period of 1 year, then a 50% discount for a phase of a duration of 3 months would correspond to a price of $1.50. The discount must be specified as a fraction strictly larger than 0 and strictly smaller than 1. The resulting price will be rounded to the nearest billable unit (e.g. cents for USD). The relative discount is considered invalid if the discounted price ends up being smaller than the minimum price allowed in any new locations Play may launch in.
+          },
+          "recurrenceCount": 42, # Required. The number of times this phase repeats. If this offer phase is not free, each recurrence charges the user the price of this offer phase.
+          "regionalConfigs": [ # Required. The region-specific configuration of this offer phase. This list must contain exactly one entry for each region for which the subscription offer has a regional config.
+            { # Configuration for a single phase of a subscription offer in a single region.
+              "absoluteDiscount": { # Represents an amount of money with its currency type. # The absolute amount of money subtracted from the base plan price prorated over the phase duration that the user pays for this offer phase. For example, if the base plan price for this region is $12 for a period of 1 year, then a $1 absolute discount for a phase of a duration of 3 months would correspond to a price of $2. The resulting price may not be smaller than the minimum price allowed for this region.
+                "currencyCode": "A String", # The three-letter currency code defined in ISO 4217.
+                "nanos": 42, # Number of nano (10^-9) units of the amount. The value must be between -999,999,999 and +999,999,999 inclusive. If `units` is positive, `nanos` must be positive or zero. If `units` is zero, `nanos` can be positive, zero, or negative. If `units` is negative, `nanos` must be negative or zero. For example $-1.75 is represented as `units`=-1 and `nanos`=-750,000,000.
+                "units": "A String", # The whole units of the amount. For example if `currencyCode` is `"USD"`, then 1 unit is one US dollar.
+              },
+              "price": { # Represents an amount of money with its currency type. # The absolute price the user pays for this offer phase. The price must not be smaller than the minimum price allowed for this region.
+                "currencyCode": "A String", # The three-letter currency code defined in ISO 4217.
+                "nanos": 42, # Number of nano (10^-9) units of the amount. The value must be between -999,999,999 and +999,999,999 inclusive. If `units` is positive, `nanos` must be positive or zero. If `units` is zero, `nanos` can be positive, zero, or negative. If `units` is negative, `nanos` must be negative or zero. For example $-1.75 is represented as `units`=-1 and `nanos`=-750,000,000.
+                "units": "A String", # The whole units of the amount. For example if `currencyCode` is `"USD"`, then 1 unit is one US dollar.
+              },
+              "regionCode": "A String", # Required. Immutable. The region to which this config applies.
+              "relativeDiscount": 3.14, # The fraction of the base plan price prorated over the phase duration that the user pays for this offer phase. For example, if the base plan price for this region is $12 for a period of 1 year, then a 50% discount for a phase of a duration of 3 months would correspond to a price of $1.50. The discount must be specified as a fraction strictly larger than 0 and strictly smaller than 1. The resulting price will be rounded to the nearest billable unit (e.g. cents for USD). The relative discount is considered invalid if the discounted price ends up being smaller than the minimum price allowed in this region.
+            },
+          ],
+        },
+      ],
+      "productId": "A String", # Required. Immutable. The ID of the parent subscription this offer belongs to.
+      "regionalConfigs": [ # Required. The region-specific configuration of this offer. Must contain at least one entry.
+        { # Configuration for a subscription offer in a single region.
+          "newSubscriberAvailability": True or False, # Whether the subscription offer in the specified region is available for new subscribers. Existing subscribers will not have their subscription cancelled if this value is set to false. If not specified, this will default to false.
+          "regionCode": "A String", # Required. Immutable. Region code this configuration applies to, as defined by ISO 3166-2, e.g. "US".
+        },
+      ],
+      "state": "A String", # Output only. The current state of this offer. Can be changed using Activate and Deactivate actions. NB: the base plan state supersedes this state, so an active offer may not be available if the base plan is not active.
+      "targeting": { # Defines the rule a user needs to satisfy to receive this offer. # The requirements that users need to fulfil to be eligible for this offer. Represents the requirements that Play will evaluate to decide whether an offer should be returned. Developers may further filter these offers themselves.
+        "acquisitionRule": { # Represents a targeting rule of the form: User never had {scope} before. # Offer targeting rule for new user acquisition.
+          "scope": { # Defines the scope of subscriptions which a targeting rule can match to target offers to users based on past or current entitlement. # Required. The scope of subscriptions this rule considers. Only allows "this subscription" and "any subscription in app".
+            "specificSubscriptionInApp": "A String", # The scope of the current targeting rule is the subscription with the specified subscription ID. Must be a subscription within the same parent app.
+          },
+        },
+        "upgradeRule": { # Represents a targeting rule of the form: User currently has {scope} [with billing period {billing_period}]. # Offer targeting rule for upgrading users' existing plans.
+          "billingPeriodDuration": "A String", # The specific billing period duration, specified in ISO 8601 format, that a user must be currently subscribed to to be eligible for this rule. If not specified, users subscribed to any billing period are matched.
+          "oncePerUser": True or False, # Limit this offer to only once per user. If set to true, a user can never be eligible for this offer again if they ever subscribed to this offer.
+          "scope": { # Defines the scope of subscriptions which a targeting rule can match to target offers to users based on past or current entitlement. # Required. The scope of subscriptions this rule considers. Only allows "this subscription" and "specific subscription in app".
+            "specificSubscriptionInApp": "A String", # The scope of the current targeting rule is the subscription with the specified subscription ID. Must be a subscription within the same parent app.
+          },
+        },
+      },
+    },
+  ],
+}
+
+
close()
Close httplib2 connections.
@@ -421,6 +891,11 @@

Method Details

The object takes the form of: { # Request message for DeactivateSubscriptionOffer. + "basePlanId": "A String", # Required. The parent base plan (ID) of the offer to deactivate. + "latencyTolerance": "A String", # Optional. The latency tolerance for the propagation of this product update. Defaults to latency-sensitive. + "offerId": "A String", # Required. The unique offer ID of the offer to deactivate. + "packageName": "A String", # Required. The parent app (package name) of the offer to deactivate. + "productId": "A String", # Required. The parent subscription (ID) of the offer to deactivate. } x__xgafv: string, V1 error format. @@ -760,7 +1235,7 @@

Method Details

- patch(packageName, productId, basePlanId, offerId, body=None, regionsVersion_version=None, updateMask=None, x__xgafv=None) + patch(packageName, productId, basePlanId, offerId, allowMissing=None, body=None, latencyTolerance=None, regionsVersion_version=None, updateMask=None, x__xgafv=None)
Updates an existing subscription offer.
 
 Args:
@@ -856,6 +1331,12 @@ 

Method Details

}, } + allowMissing: boolean, Optional. If set to true, and the subscription offer with the given package_name, product_id, base_plan_id and offer_id doesn't exist, an offer will be created. If a new offer is created, update_mask is ignored. + latencyTolerance: string, Optional. The latency tolerance for the propagation of this product update. Defaults to latency-sensitive. + Allowed values + PRODUCT_UPDATE_LATENCY_TOLERANCE_UNSPECIFIED - Defaults to PRODUCT_UPDATE_LATENCY_TOLERANCE_LATENCY_SENSITIVE. + PRODUCT_UPDATE_LATENCY_TOLERANCE_LATENCY_SENSITIVE - The update will propagate to clients within several minutes on average and up to a few hours in rare cases. Throughput is limited to 7,200 updates per app per hour. + PRODUCT_UPDATE_LATENCY_TOLERANCE_LATENCY_TOLERANT - The update will propagate to clients within 24 hours. Supports high throughput of up to 720,000 updates per app per hour using batch modification methods. regionsVersion_version: string, Required. A string representing the version of available regions being used for the specified resource. Regional prices for the resource have to be specified according to the information published in [this article](https://support.google.com/googleplay/android-developer/answer/10532353). Each time the supported locations substantially change, the version will be incremented. Using this field will ensure that creating and updating the resource with an older region's version and set of regional prices and currencies will succeed even though a new version is available. The latest version is 2022/02. updateMask: string, Required. The list of fields to be updated. x__xgafv: string, V1 error format. diff --git a/docs/dyn/androidpublisher_v3.monetization.subscriptions.html b/docs/dyn/androidpublisher_v3.monetization.subscriptions.html index c4cbff8df36..481903e74b9 100644 --- a/docs/dyn/androidpublisher_v3.monetization.subscriptions.html +++ b/docs/dyn/androidpublisher_v3.monetization.subscriptions.html @@ -82,6 +82,12 @@

Instance Methods

archive(packageName, productId, body=None, x__xgafv=None)

Archives a subscription. Can only be done if at least one base plan was active in the past, and no base plan is available for new or existing subscribers currently. This action is irreversible, and the subscription ID will remain reserved.

+

+ batchGet(packageName, productIds=None, x__xgafv=None)

+

Reads one or more subscriptions.

+

+ batchUpdate(packageName, body=None, x__xgafv=None)

+

Updates a batch of subscriptions. Set the latencyTolerance field on nested requests to PRODUCT_UPDATE_LATENCY_TOLERANCE_LATENCY_TOLERANT to achieve maximum update throughput.

close()

Close httplib2 connections.

@@ -101,7 +107,7 @@

Instance Methods

list_next()

Retrieves the next page of results.

- patch(packageName, productId, body=None, regionsVersion_version=None, updateMask=None, x__xgafv=None)

+ patch(packageName, productId, allowMissing=None, body=None, latencyTolerance=None, regionsVersion_version=None, updateMask=None, x__xgafv=None)

Updates an existing subscription.

Method Details

@@ -200,6 +206,282 @@

Method Details

}
+
+ batchGet(packageName, productIds=None, x__xgafv=None) +
Reads one or more subscriptions.
+
+Args:
+  packageName: string, Required. The parent app (package name) for which the subscriptions should be retrieved. Must be equal to the package_name field on all the requests. (required)
+  productIds: string, Required. A list of up to 100 subscription product IDs to retrieve. All the IDs must be different. (repeated)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response message for BatchGetSubscriptions endpoint.
+  "subscriptions": [ # The list of requested subscriptions, in the same order as the request.
+    { # A single subscription for an app.
+      "archived": True or False, # Output only. Whether this subscription is archived. Archived subscriptions are not available to any subscriber any longer, cannot be updated, and are not returned in list requests unless the show archived flag is passed in.
+      "basePlans": [ # The set of base plans for this subscription. Represents the prices and duration of the subscription if no other offers apply.
+        { # A single base plan for a subscription.
+          "autoRenewingBasePlanType": { # Represents a base plan that automatically renews at the end of its subscription period. # Set when the base plan automatically renews at a regular interval.
+            "billingPeriodDuration": "A String", # Required. Subscription period, specified in ISO 8601 format. For a list of acceptable billing periods, refer to the help center.
+            "gracePeriodDuration": "A String", # Grace period of the subscription, specified in ISO 8601 format. Acceptable values are P0D (zero days), P3D (3 days), P7D (7 days), P14D (14 days), and P30D (30 days). If not specified, a default value will be used based on the recurring period duration.
+            "legacyCompatible": True or False, # Whether the renewing base plan is backward compatible. The backward compatible base plan is returned by the Google Play Billing Library deprecated method querySkuDetailsAsync(). Only one renewing base plan can be marked as legacy compatible for a given subscription.
+            "legacyCompatibleSubscriptionOfferId": "A String", # Subscription offer id which is legacy compatible. The backward compatible subscription offer is returned by the Google Play Billing Library deprecated method querySkuDetailsAsync(). Only one subscription offer can be marked as legacy compatible for a given renewing base plan. To have no Subscription offer as legacy compatible set this field as empty string.
+            "prorationMode": "A String", # The proration mode for the base plan determines what happens when a user switches to this plan from another base plan. If unspecified, defaults to CHARGE_ON_NEXT_BILLING_DATE.
+            "resubscribeState": "A String", # Whether users should be able to resubscribe to this base plan in Google Play surfaces. Defaults to RESUBSCRIBE_STATE_ACTIVE if not specified.
+          },
+          "basePlanId": "A String", # Required. Immutable. The unique identifier of this base plan. Must be unique within the subscription, and conform with RFC-1034. That is, this ID can only contain lower-case letters (a-z), numbers (0-9), and hyphens (-), and be at most 63 characters.
+          "offerTags": [ # List of up to 20 custom tags specified for this base plan, and returned to the app through the billing library. Subscription offers for this base plan will also receive these offer tags in the billing library.
+            { # Represents a custom tag specified for base plans and subscription offers.
+              "tag": "A String", # Must conform with RFC-1034. That is, this string can only contain lower-case letters (a-z), numbers (0-9), and hyphens (-), and be at most 20 characters.
+            },
+          ],
+          "otherRegionsConfig": { # Pricing information for any new locations Play may launch in. # Pricing information for any new locations Play may launch in the future. If omitted, the BasePlan will not be automatically available any new locations Play may launch in the future.
+            "eurPrice": { # Represents an amount of money with its currency type. # Required. Price in EUR to use for any new locations Play may launch in.
+              "currencyCode": "A String", # The three-letter currency code defined in ISO 4217.
+              "nanos": 42, # Number of nano (10^-9) units of the amount. The value must be between -999,999,999 and +999,999,999 inclusive. If `units` is positive, `nanos` must be positive or zero. If `units` is zero, `nanos` can be positive, zero, or negative. If `units` is negative, `nanos` must be negative or zero. For example $-1.75 is represented as `units`=-1 and `nanos`=-750,000,000.
+              "units": "A String", # The whole units of the amount. For example if `currencyCode` is `"USD"`, then 1 unit is one US dollar.
+            },
+            "newSubscriberAvailability": True or False, # Whether the base plan is available for new subscribers in any new locations Play may launch in. If not specified, this will default to false.
+            "usdPrice": { # Represents an amount of money with its currency type. # Required. Price in USD to use for any new locations Play may launch in.
+              "currencyCode": "A String", # The three-letter currency code defined in ISO 4217.
+              "nanos": 42, # Number of nano (10^-9) units of the amount. The value must be between -999,999,999 and +999,999,999 inclusive. If `units` is positive, `nanos` must be positive or zero. If `units` is zero, `nanos` can be positive, zero, or negative. If `units` is negative, `nanos` must be negative or zero. For example $-1.75 is represented as `units`=-1 and `nanos`=-750,000,000.
+              "units": "A String", # The whole units of the amount. For example if `currencyCode` is `"USD"`, then 1 unit is one US dollar.
+            },
+          },
+          "prepaidBasePlanType": { # Represents a base plan that does not automatically renew at the end of the base plan, and must be manually renewed by the user. # Set when the base plan does not automatically renew at the end of the billing period.
+            "billingPeriodDuration": "A String", # Required. Subscription period, specified in ISO 8601 format. For a list of acceptable billing periods, refer to the help center.
+            "timeExtension": "A String", # Whether users should be able to extend this prepaid base plan in Google Play surfaces. Defaults to TIME_EXTENSION_ACTIVE if not specified.
+          },
+          "regionalConfigs": [ # Region-specific information for this base plan.
+            { # Configuration for a base plan specific to a region.
+              "newSubscriberAvailability": True or False, # Whether the base plan in the specified region is available for new subscribers. Existing subscribers will not have their subscription canceled if this value is set to false. If not specified, this will default to false.
+              "price": { # Represents an amount of money with its currency type. # The price of the base plan in the specified region. Must be set if the base plan is available to new subscribers. Must be set in the currency that is linked to the specified region.
+                "currencyCode": "A String", # The three-letter currency code defined in ISO 4217.
+                "nanos": 42, # Number of nano (10^-9) units of the amount. The value must be between -999,999,999 and +999,999,999 inclusive. If `units` is positive, `nanos` must be positive or zero. If `units` is zero, `nanos` can be positive, zero, or negative. If `units` is negative, `nanos` must be negative or zero. For example $-1.75 is represented as `units`=-1 and `nanos`=-750,000,000.
+                "units": "A String", # The whole units of the amount. For example if `currencyCode` is `"USD"`, then 1 unit is one US dollar.
+              },
+              "regionCode": "A String", # Required. Region code this configuration applies to, as defined by ISO 3166-2, e.g. "US".
+            },
+          ],
+          "state": "A String", # Output only. The state of the base plan, i.e. whether it's active. Draft and inactive base plans can be activated or deleted. Active base plans can be made inactive. Inactive base plans can be canceled. This field cannot be changed by updating the resource. Use the dedicated endpoints instead.
+        },
+      ],
+      "listings": [ # Required. List of localized listings for this subscription. Must contain at least an entry for the default language of the parent app.
+        { # The consumer-visible metadata of a subscription.
+          "benefits": [ # A list of benefits shown to the user on platforms such as the Play Store and in restoration flows in the language of this listing. Plain text. Ordered list of at most four benefits.
+            "A String",
+          ],
+          "description": "A String", # The description of this subscription in the language of this listing. Maximum length - 80 characters. Plain text.
+          "languageCode": "A String", # Required. The language of this listing, as defined by BCP-47, e.g. "en-US".
+          "title": "A String", # Required. The title of this subscription in the language of this listing. Plain text.
+        },
+      ],
+      "packageName": "A String", # Immutable. Package name of the parent app.
+      "productId": "A String", # Immutable. Unique product ID of the product. Unique within the parent app. Product IDs must be composed of lower-case letters (a-z), numbers (0-9), underscores (_) and dots (.). It must start with a lower-case letter or number, and be between 1 and 40 (inclusive) characters in length.
+      "taxAndComplianceSettings": { # Details about taxation, Google Play policy and legal compliance for subscription products. # Details about taxes and legal compliance.
+        "eeaWithdrawalRightType": "A String", # Digital content or service classification for products distributed to users in the European Economic Area (EEA). The withdrawal regime under EEA consumer laws depends on this classification. Refer to the [Help Center article](https://support.google.com/googleplay/android-developer/answer/10463498) for more information.
+        "isTokenizedDigitalAsset": True or False, # Whether this subscription is declared as a product representing a tokenized digital asset.
+        "taxRateInfoByRegionCode": { # A mapping from region code to tax rate details. The keys are region codes as defined by Unicode's "CLDR".
+          "a_key": { # Specified details about taxation in a given geographical region.
+            "eligibleForStreamingServiceTaxRate": True or False, # You must tell us if your app contains streaming products to correctly charge US state and local sales tax. Field only supported in United States.
+            "streamingTaxType": "A String", # To collect communications or amusement taxes in the United States, choose the appropriate tax category. [Learn more](https://support.google.com/googleplay/android-developer/answer/10463498#streaming_tax).
+            "taxTier": "A String", # Tax tier to specify reduced tax rate. Developers who sell digital news, magazines, newspapers, books, or audiobooks in various regions may be eligible for reduced tax rates. [Learn more](https://support.google.com/googleplay/android-developer/answer/10463498).
+          },
+        },
+      },
+    },
+  ],
+}
+
+ +
+ batchUpdate(packageName, body=None, x__xgafv=None) +
Updates a batch of subscriptions. Set the latencyTolerance field on nested requests to PRODUCT_UPDATE_LATENCY_TOLERANCE_LATENCY_TOLERANT to achieve maximum update throughput.
+
+Args:
+  packageName: string, Required. The parent app (package name) for which the subscriptions should be updated. Must be equal to the package_name field on all the Subscription resources. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Request message for BatchUpdateSubscription.
+  "requests": [ # Required. A list of update requests of up to 100 elements. All requests must update different subscriptions.
+    { # Request message for UpdateSubscription.
+      "allowMissing": True or False, # Optional. If set to true, and the subscription with the given package_name and product_id doesn't exist, the subscription will be created. If a new subscription is created, update_mask is ignored.
+      "latencyTolerance": "A String", # Optional. The latency tolerance for the propagation of this product update. Defaults to latency-sensitive.
+      "regionsVersion": { # The version of the available regions being used for the specified resource. # Required. The version of the available regions being used for the subscription.
+        "version": "A String", # Required. A string representing the version of available regions being used for the specified resource. Regional prices for the resource have to be specified according to the information published in [this article](https://support.google.com/googleplay/android-developer/answer/10532353). Each time the supported locations substantially change, the version will be incremented. Using this field will ensure that creating and updating the resource with an older region's version and set of regional prices and currencies will succeed even though a new version is available. The latest version is 2022/02.
+      },
+      "subscription": { # A single subscription for an app. # Required. The subscription to update.
+        "archived": True or False, # Output only. Whether this subscription is archived. Archived subscriptions are not available to any subscriber any longer, cannot be updated, and are not returned in list requests unless the show archived flag is passed in.
+        "basePlans": [ # The set of base plans for this subscription. Represents the prices and duration of the subscription if no other offers apply.
+          { # A single base plan for a subscription.
+            "autoRenewingBasePlanType": { # Represents a base plan that automatically renews at the end of its subscription period. # Set when the base plan automatically renews at a regular interval.
+              "billingPeriodDuration": "A String", # Required. Subscription period, specified in ISO 8601 format. For a list of acceptable billing periods, refer to the help center.
+              "gracePeriodDuration": "A String", # Grace period of the subscription, specified in ISO 8601 format. Acceptable values are P0D (zero days), P3D (3 days), P7D (7 days), P14D (14 days), and P30D (30 days). If not specified, a default value will be used based on the recurring period duration.
+              "legacyCompatible": True or False, # Whether the renewing base plan is backward compatible. The backward compatible base plan is returned by the Google Play Billing Library deprecated method querySkuDetailsAsync(). Only one renewing base plan can be marked as legacy compatible for a given subscription.
+              "legacyCompatibleSubscriptionOfferId": "A String", # Subscription offer id which is legacy compatible. The backward compatible subscription offer is returned by the Google Play Billing Library deprecated method querySkuDetailsAsync(). Only one subscription offer can be marked as legacy compatible for a given renewing base plan. To have no Subscription offer as legacy compatible set this field as empty string.
+              "prorationMode": "A String", # The proration mode for the base plan determines what happens when a user switches to this plan from another base plan. If unspecified, defaults to CHARGE_ON_NEXT_BILLING_DATE.
+              "resubscribeState": "A String", # Whether users should be able to resubscribe to this base plan in Google Play surfaces. Defaults to RESUBSCRIBE_STATE_ACTIVE if not specified.
+            },
+            "basePlanId": "A String", # Required. Immutable. The unique identifier of this base plan. Must be unique within the subscription, and conform with RFC-1034. That is, this ID can only contain lower-case letters (a-z), numbers (0-9), and hyphens (-), and be at most 63 characters.
+            "offerTags": [ # List of up to 20 custom tags specified for this base plan, and returned to the app through the billing library. Subscription offers for this base plan will also receive these offer tags in the billing library.
+              { # Represents a custom tag specified for base plans and subscription offers.
+                "tag": "A String", # Must conform with RFC-1034. That is, this string can only contain lower-case letters (a-z), numbers (0-9), and hyphens (-), and be at most 20 characters.
+              },
+            ],
+            "otherRegionsConfig": { # Pricing information for any new locations Play may launch in. # Pricing information for any new locations Play may launch in the future. If omitted, the BasePlan will not be automatically available any new locations Play may launch in the future.
+              "eurPrice": { # Represents an amount of money with its currency type. # Required. Price in EUR to use for any new locations Play may launch in.
+                "currencyCode": "A String", # The three-letter currency code defined in ISO 4217.
+                "nanos": 42, # Number of nano (10^-9) units of the amount. The value must be between -999,999,999 and +999,999,999 inclusive. If `units` is positive, `nanos` must be positive or zero. If `units` is zero, `nanos` can be positive, zero, or negative. If `units` is negative, `nanos` must be negative or zero. For example $-1.75 is represented as `units`=-1 and `nanos`=-750,000,000.
+                "units": "A String", # The whole units of the amount. For example if `currencyCode` is `"USD"`, then 1 unit is one US dollar.
+              },
+              "newSubscriberAvailability": True or False, # Whether the base plan is available for new subscribers in any new locations Play may launch in. If not specified, this will default to false.
+              "usdPrice": { # Represents an amount of money with its currency type. # Required. Price in USD to use for any new locations Play may launch in.
+                "currencyCode": "A String", # The three-letter currency code defined in ISO 4217.
+                "nanos": 42, # Number of nano (10^-9) units of the amount. The value must be between -999,999,999 and +999,999,999 inclusive. If `units` is positive, `nanos` must be positive or zero. If `units` is zero, `nanos` can be positive, zero, or negative. If `units` is negative, `nanos` must be negative or zero. For example $-1.75 is represented as `units`=-1 and `nanos`=-750,000,000.
+                "units": "A String", # The whole units of the amount. For example if `currencyCode` is `"USD"`, then 1 unit is one US dollar.
+              },
+            },
+            "prepaidBasePlanType": { # Represents a base plan that does not automatically renew at the end of the base plan, and must be manually renewed by the user. # Set when the base plan does not automatically renew at the end of the billing period.
+              "billingPeriodDuration": "A String", # Required. Subscription period, specified in ISO 8601 format. For a list of acceptable billing periods, refer to the help center.
+              "timeExtension": "A String", # Whether users should be able to extend this prepaid base plan in Google Play surfaces. Defaults to TIME_EXTENSION_ACTIVE if not specified.
+            },
+            "regionalConfigs": [ # Region-specific information for this base plan.
+              { # Configuration for a base plan specific to a region.
+                "newSubscriberAvailability": True or False, # Whether the base plan in the specified region is available for new subscribers. Existing subscribers will not have their subscription canceled if this value is set to false. If not specified, this will default to false.
+                "price": { # Represents an amount of money with its currency type. # The price of the base plan in the specified region. Must be set if the base plan is available to new subscribers. Must be set in the currency that is linked to the specified region.
+                  "currencyCode": "A String", # The three-letter currency code defined in ISO 4217.
+                  "nanos": 42, # Number of nano (10^-9) units of the amount. The value must be between -999,999,999 and +999,999,999 inclusive. If `units` is positive, `nanos` must be positive or zero. If `units` is zero, `nanos` can be positive, zero, or negative. If `units` is negative, `nanos` must be negative or zero. For example $-1.75 is represented as `units`=-1 and `nanos`=-750,000,000.
+                  "units": "A String", # The whole units of the amount. For example if `currencyCode` is `"USD"`, then 1 unit is one US dollar.
+                },
+                "regionCode": "A String", # Required. Region code this configuration applies to, as defined by ISO 3166-2, e.g. "US".
+              },
+            ],
+            "state": "A String", # Output only. The state of the base plan, i.e. whether it's active. Draft and inactive base plans can be activated or deleted. Active base plans can be made inactive. Inactive base plans can be canceled. This field cannot be changed by updating the resource. Use the dedicated endpoints instead.
+          },
+        ],
+        "listings": [ # Required. List of localized listings for this subscription. Must contain at least an entry for the default language of the parent app.
+          { # The consumer-visible metadata of a subscription.
+            "benefits": [ # A list of benefits shown to the user on platforms such as the Play Store and in restoration flows in the language of this listing. Plain text. Ordered list of at most four benefits.
+              "A String",
+            ],
+            "description": "A String", # The description of this subscription in the language of this listing. Maximum length - 80 characters. Plain text.
+            "languageCode": "A String", # Required. The language of this listing, as defined by BCP-47, e.g. "en-US".
+            "title": "A String", # Required. The title of this subscription in the language of this listing. Plain text.
+          },
+        ],
+        "packageName": "A String", # Immutable. Package name of the parent app.
+        "productId": "A String", # Immutable. Unique product ID of the product. Unique within the parent app. Product IDs must be composed of lower-case letters (a-z), numbers (0-9), underscores (_) and dots (.). It must start with a lower-case letter or number, and be between 1 and 40 (inclusive) characters in length.
+        "taxAndComplianceSettings": { # Details about taxation, Google Play policy and legal compliance for subscription products. # Details about taxes and legal compliance.
+          "eeaWithdrawalRightType": "A String", # Digital content or service classification for products distributed to users in the European Economic Area (EEA). The withdrawal regime under EEA consumer laws depends on this classification. Refer to the [Help Center article](https://support.google.com/googleplay/android-developer/answer/10463498) for more information.
+          "isTokenizedDigitalAsset": True or False, # Whether this subscription is declared as a product representing a tokenized digital asset.
+          "taxRateInfoByRegionCode": { # A mapping from region code to tax rate details. The keys are region codes as defined by Unicode's "CLDR".
+            "a_key": { # Specified details about taxation in a given geographical region.
+              "eligibleForStreamingServiceTaxRate": True or False, # You must tell us if your app contains streaming products to correctly charge US state and local sales tax. Field only supported in United States.
+              "streamingTaxType": "A String", # To collect communications or amusement taxes in the United States, choose the appropriate tax category. [Learn more](https://support.google.com/googleplay/android-developer/answer/10463498#streaming_tax).
+              "taxTier": "A String", # Tax tier to specify reduced tax rate. Developers who sell digital news, magazines, newspapers, books, or audiobooks in various regions may be eligible for reduced tax rates. [Learn more](https://support.google.com/googleplay/android-developer/answer/10463498).
+            },
+          },
+        },
+      },
+      "updateMask": "A String", # Required. The list of fields to be updated.
+    },
+  ],
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response message for BatchUpdateSubscription.
+  "subscriptions": [ # The updated subscriptions list.
+    { # A single subscription for an app.
+      "archived": True or False, # Output only. Whether this subscription is archived. Archived subscriptions are not available to any subscriber any longer, cannot be updated, and are not returned in list requests unless the show archived flag is passed in.
+      "basePlans": [ # The set of base plans for this subscription. Represents the prices and duration of the subscription if no other offers apply.
+        { # A single base plan for a subscription.
+          "autoRenewingBasePlanType": { # Represents a base plan that automatically renews at the end of its subscription period. # Set when the base plan automatically renews at a regular interval.
+            "billingPeriodDuration": "A String", # Required. Subscription period, specified in ISO 8601 format. For a list of acceptable billing periods, refer to the help center.
+            "gracePeriodDuration": "A String", # Grace period of the subscription, specified in ISO 8601 format. Acceptable values are P0D (zero days), P3D (3 days), P7D (7 days), P14D (14 days), and P30D (30 days). If not specified, a default value will be used based on the recurring period duration.
+            "legacyCompatible": True or False, # Whether the renewing base plan is backward compatible. The backward compatible base plan is returned by the Google Play Billing Library deprecated method querySkuDetailsAsync(). Only one renewing base plan can be marked as legacy compatible for a given subscription.
+            "legacyCompatibleSubscriptionOfferId": "A String", # Subscription offer id which is legacy compatible. The backward compatible subscription offer is returned by the Google Play Billing Library deprecated method querySkuDetailsAsync(). Only one subscription offer can be marked as legacy compatible for a given renewing base plan. To have no Subscription offer as legacy compatible set this field as empty string.
+            "prorationMode": "A String", # The proration mode for the base plan determines what happens when a user switches to this plan from another base plan. If unspecified, defaults to CHARGE_ON_NEXT_BILLING_DATE.
+            "resubscribeState": "A String", # Whether users should be able to resubscribe to this base plan in Google Play surfaces. Defaults to RESUBSCRIBE_STATE_ACTIVE if not specified.
+          },
+          "basePlanId": "A String", # Required. Immutable. The unique identifier of this base plan. Must be unique within the subscription, and conform with RFC-1034. That is, this ID can only contain lower-case letters (a-z), numbers (0-9), and hyphens (-), and be at most 63 characters.
+          "offerTags": [ # List of up to 20 custom tags specified for this base plan, and returned to the app through the billing library. Subscription offers for this base plan will also receive these offer tags in the billing library.
+            { # Represents a custom tag specified for base plans and subscription offers.
+              "tag": "A String", # Must conform with RFC-1034. That is, this string can only contain lower-case letters (a-z), numbers (0-9), and hyphens (-), and be at most 20 characters.
+            },
+          ],
+          "otherRegionsConfig": { # Pricing information for any new locations Play may launch in. # Pricing information for any new locations Play may launch in the future. If omitted, the BasePlan will not be automatically available any new locations Play may launch in the future.
+            "eurPrice": { # Represents an amount of money with its currency type. # Required. Price in EUR to use for any new locations Play may launch in.
+              "currencyCode": "A String", # The three-letter currency code defined in ISO 4217.
+              "nanos": 42, # Number of nano (10^-9) units of the amount. The value must be between -999,999,999 and +999,999,999 inclusive. If `units` is positive, `nanos` must be positive or zero. If `units` is zero, `nanos` can be positive, zero, or negative. If `units` is negative, `nanos` must be negative or zero. For example $-1.75 is represented as `units`=-1 and `nanos`=-750,000,000.
+              "units": "A String", # The whole units of the amount. For example if `currencyCode` is `"USD"`, then 1 unit is one US dollar.
+            },
+            "newSubscriberAvailability": True or False, # Whether the base plan is available for new subscribers in any new locations Play may launch in. If not specified, this will default to false.
+            "usdPrice": { # Represents an amount of money with its currency type. # Required. Price in USD to use for any new locations Play may launch in.
+              "currencyCode": "A String", # The three-letter currency code defined in ISO 4217.
+              "nanos": 42, # Number of nano (10^-9) units of the amount. The value must be between -999,999,999 and +999,999,999 inclusive. If `units` is positive, `nanos` must be positive or zero. If `units` is zero, `nanos` can be positive, zero, or negative. If `units` is negative, `nanos` must be negative or zero. For example $-1.75 is represented as `units`=-1 and `nanos`=-750,000,000.
+              "units": "A String", # The whole units of the amount. For example if `currencyCode` is `"USD"`, then 1 unit is one US dollar.
+            },
+          },
+          "prepaidBasePlanType": { # Represents a base plan that does not automatically renew at the end of the base plan, and must be manually renewed by the user. # Set when the base plan does not automatically renew at the end of the billing period.
+            "billingPeriodDuration": "A String", # Required. Subscription period, specified in ISO 8601 format. For a list of acceptable billing periods, refer to the help center.
+            "timeExtension": "A String", # Whether users should be able to extend this prepaid base plan in Google Play surfaces. Defaults to TIME_EXTENSION_ACTIVE if not specified.
+          },
+          "regionalConfigs": [ # Region-specific information for this base plan.
+            { # Configuration for a base plan specific to a region.
+              "newSubscriberAvailability": True or False, # Whether the base plan in the specified region is available for new subscribers. Existing subscribers will not have their subscription canceled if this value is set to false. If not specified, this will default to false.
+              "price": { # Represents an amount of money with its currency type. # The price of the base plan in the specified region. Must be set if the base plan is available to new subscribers. Must be set in the currency that is linked to the specified region.
+                "currencyCode": "A String", # The three-letter currency code defined in ISO 4217.
+                "nanos": 42, # Number of nano (10^-9) units of the amount. The value must be between -999,999,999 and +999,999,999 inclusive. If `units` is positive, `nanos` must be positive or zero. If `units` is zero, `nanos` can be positive, zero, or negative. If `units` is negative, `nanos` must be negative or zero. For example $-1.75 is represented as `units`=-1 and `nanos`=-750,000,000.
+                "units": "A String", # The whole units of the amount. For example if `currencyCode` is `"USD"`, then 1 unit is one US dollar.
+              },
+              "regionCode": "A String", # Required. Region code this configuration applies to, as defined by ISO 3166-2, e.g. "US".
+            },
+          ],
+          "state": "A String", # Output only. The state of the base plan, i.e. whether it's active. Draft and inactive base plans can be activated or deleted. Active base plans can be made inactive. Inactive base plans can be canceled. This field cannot be changed by updating the resource. Use the dedicated endpoints instead.
+        },
+      ],
+      "listings": [ # Required. List of localized listings for this subscription. Must contain at least an entry for the default language of the parent app.
+        { # The consumer-visible metadata of a subscription.
+          "benefits": [ # A list of benefits shown to the user on platforms such as the Play Store and in restoration flows in the language of this listing. Plain text. Ordered list of at most four benefits.
+            "A String",
+          ],
+          "description": "A String", # The description of this subscription in the language of this listing. Maximum length - 80 characters. Plain text.
+          "languageCode": "A String", # Required. The language of this listing, as defined by BCP-47, e.g. "en-US".
+          "title": "A String", # Required. The title of this subscription in the language of this listing. Plain text.
+        },
+      ],
+      "packageName": "A String", # Immutable. Package name of the parent app.
+      "productId": "A String", # Immutable. Unique product ID of the product. Unique within the parent app. Product IDs must be composed of lower-case letters (a-z), numbers (0-9), underscores (_) and dots (.). It must start with a lower-case letter or number, and be between 1 and 40 (inclusive) characters in length.
+      "taxAndComplianceSettings": { # Details about taxation, Google Play policy and legal compliance for subscription products. # Details about taxes and legal compliance.
+        "eeaWithdrawalRightType": "A String", # Digital content or service classification for products distributed to users in the European Economic Area (EEA). The withdrawal regime under EEA consumer laws depends on this classification. Refer to the [Help Center article](https://support.google.com/googleplay/android-developer/answer/10463498) for more information.
+        "isTokenizedDigitalAsset": True or False, # Whether this subscription is declared as a product representing a tokenized digital asset.
+        "taxRateInfoByRegionCode": { # A mapping from region code to tax rate details. The keys are region codes as defined by Unicode's "CLDR".
+          "a_key": { # Specified details about taxation in a given geographical region.
+            "eligibleForStreamingServiceTaxRate": True or False, # You must tell us if your app contains streaming products to correctly charge US state and local sales tax. Field only supported in United States.
+            "streamingTaxType": "A String", # To collect communications or amusement taxes in the United States, choose the appropriate tax category. [Learn more](https://support.google.com/googleplay/android-developer/answer/10463498#streaming_tax).
+            "taxTier": "A String", # Tax tier to specify reduced tax rate. Developers who sell digital news, magazines, newspapers, books, or audiobooks in various regions may be eligible for reduced tax rates. [Learn more](https://support.google.com/googleplay/android-developer/answer/10463498).
+          },
+        },
+      },
+    },
+  ],
+}
+
+
close()
Close httplib2 connections.
@@ -589,7 +871,7 @@

Method Details

- patch(packageName, productId, body=None, regionsVersion_version=None, updateMask=None, x__xgafv=None) + patch(packageName, productId, allowMissing=None, body=None, latencyTolerance=None, regionsVersion_version=None, updateMask=None, x__xgafv=None)
Updates an existing subscription.
 
 Args:
@@ -672,6 +954,12 @@ 

Method Details

}, } + allowMissing: boolean, Optional. If set to true, and the subscription with the given package_name and product_id doesn't exist, the subscription will be created. If a new subscription is created, update_mask is ignored. + latencyTolerance: string, Optional. The latency tolerance for the propagation of this product update. Defaults to latency-sensitive. + Allowed values + PRODUCT_UPDATE_LATENCY_TOLERANCE_UNSPECIFIED - Defaults to PRODUCT_UPDATE_LATENCY_TOLERANCE_LATENCY_SENSITIVE. + PRODUCT_UPDATE_LATENCY_TOLERANCE_LATENCY_SENSITIVE - The update will propagate to clients within several minutes on average and up to a few hours in rare cases. Throughput is limited to 7,200 updates per app per hour. + PRODUCT_UPDATE_LATENCY_TOLERANCE_LATENCY_TOLERANT - The update will propagate to clients within 24 hours. Supports high throughput of up to 720,000 updates per app per hour using batch modification methods. regionsVersion_version: string, Required. A string representing the version of available regions being used for the specified resource. Regional prices for the resource have to be specified according to the information published in [this article](https://support.google.com/googleplay/android-developer/answer/10532353). Each time the supported locations substantially change, the version will be incremented. Using this field will ensure that creating and updating the resource with an older region's version and set of regional prices and currencies will succeed even though a new version is available. The latest version is 2022/02. updateMask: string, Required. The list of fields to be updated. x__xgafv: string, V1 error format. diff --git a/docs/dyn/apphub_v1.html b/docs/dyn/apphub_v1.html new file mode 100644 index 00000000000..a58e94025b6 --- /dev/null +++ b/docs/dyn/apphub_v1.html @@ -0,0 +1,111 @@ + + + +

App Hub API

+

Instance Methods

+

+ projects() +

+

Returns the projects Resource.

+ +

+ close()

+

Close httplib2 connections.

+

+ new_batch_http_request()

+

Create a BatchHttpRequest object based on the discovery document.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ +
+ new_batch_http_request() +
Create a BatchHttpRequest object based on the discovery document.
+
+                Args:
+                  callback: callable, A callback to be called for each response, of the
+                    form callback(id, response, exception). The first parameter is the
+                    request id, and the second is the deserialized response object. The
+                    third is an apiclient.errors.HttpError exception object if an HTTP
+                    error occurred while processing the request, or None if no error
+                    occurred.
+
+                Returns:
+                  A BatchHttpRequest object based on the discovery document.
+                
+
+ + \ No newline at end of file diff --git a/docs/dyn/apphub_v1.projects.html b/docs/dyn/apphub_v1.projects.html new file mode 100644 index 00000000000..c17de401ae6 --- /dev/null +++ b/docs/dyn/apphub_v1.projects.html @@ -0,0 +1,91 @@ + + + +

App Hub API . projects

+

Instance Methods

+

+ locations() +

+

Returns the locations Resource.

+ +

+ close()

+

Close httplib2 connections.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ + \ No newline at end of file diff --git a/docs/dyn/apphub_v1.projects.locations.applications.html b/docs/dyn/apphub_v1.projects.locations.applications.html new file mode 100644 index 00000000000..e4868e39844 --- /dev/null +++ b/docs/dyn/apphub_v1.projects.locations.applications.html @@ -0,0 +1,258 @@ + + + +

App Hub API . projects . locations . applications

+

Instance Methods

+

+ close()

+

Close httplib2 connections.

+

+ getIamPolicy(resource, options_requestedPolicyVersion=None, x__xgafv=None)

+

Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.

+

+ setIamPolicy(resource, body=None, x__xgafv=None)

+

Sets the access control policy on the specified resource. Replaces any existing policy. Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.

+

+ testIamPermissions(resource, body=None, x__xgafv=None)

+

Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a `NOT_FOUND` error. Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may "fail open" without warning.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ +
+ getIamPolicy(resource, options_requestedPolicyVersion=None, x__xgafv=None) +
Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.
+
+Args:
+  resource: string, REQUIRED: The resource for which the policy is being requested. See [Resource names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field. (required)
+  options_requestedPolicyVersion: integer, Optional. The maximum policy version that will be used to format the policy. Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected. Requests for policies with any conditional role bindings must specify version 3. Policies with no conditional role bindings may specify any valid value or leave the field unset. The policy in the response might use the policy version that you specified, or it might use a lower policy version. For example, if you specify version 3, but the policy has no conditional role bindings, the response uses version 1. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # An Identity and Access Management (IAM) policy, which specifies access controls for Google Cloud resources. A `Policy` is a collection of `bindings`. A `binding` binds one or more `members`, or principals, to a single `role`. Principals can be user accounts, service accounts, Google groups, and domains (such as G Suite). A `role` is a named list of permissions; each `role` can be an IAM predefined role or a user-created custom role. For some types of Google Cloud resources, a `binding` can also specify a `condition`, which is a logical expression that allows access to a resource only if the expression evaluates to `true`. A condition can add constraints based on attributes of the request, the resource, or both. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies). **JSON example:** ``` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 } ``` **YAML example:** ``` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3 ``` For a description of IAM and its features, see the [IAM documentation](https://cloud.google.com/iam/docs/).
+  "auditConfigs": [ # Specifies cloud audit logging configuration for this policy.
+    { # Specifies the audit configuration for a service. The configuration determines which permission types are logged, and what identities, if any, are exempted from logging. An AuditConfig must have one or more AuditLogConfigs. If there are AuditConfigs for both `allServices` and a specific service, the union of the two AuditConfigs is used for that service: the log_types specified in each AuditConfig are enabled, and the exempted_members in each AuditLogConfig are exempted. Example Policy with multiple AuditConfigs: { "audit_configs": [ { "service": "allServices", "audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": [ "user:jose@example.com" ] }, { "log_type": "DATA_WRITE" }, { "log_type": "ADMIN_READ" } ] }, { "service": "sampleservice.googleapis.com", "audit_log_configs": [ { "log_type": "DATA_READ" }, { "log_type": "DATA_WRITE", "exempted_members": [ "user:aliya@example.com" ] } ] } ] } For sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ logging. It also exempts `jose@example.com` from DATA_READ logging, and `aliya@example.com` from DATA_WRITE logging.
+      "auditLogConfigs": [ # The configuration for logging of each type of permission.
+        { # Provides the configuration for logging a type of permissions. Example: { "audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": [ "user:jose@example.com" ] }, { "log_type": "DATA_WRITE" } ] } This enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting jose@example.com from DATA_READ logging.
+          "exemptedMembers": [ # Specifies the identities that do not cause logging for this type of permission. Follows the same format of Binding.members.
+            "A String",
+          ],
+          "logType": "A String", # The log type that this config enables.
+        },
+      ],
+      "service": "A String", # Specifies a service that will be enabled for audit logging. For example, `storage.googleapis.com`, `cloudsql.googleapis.com`. `allServices` is a special value that covers all services.
+    },
+  ],
+  "bindings": [ # Associates a list of `members`, or principals, with a `role`. Optionally, may specify a `condition` that determines how and when the `bindings` are applied. Each of the `bindings` must contain at least one principal. The `bindings` in a `Policy` can refer to up to 1,500 principals; up to 250 of these principals can be Google groups. Each occurrence of a principal counts towards these limits. For example, if the `bindings` grant 50 different roles to `user:alice@example.com`, and not to any other principal, then you can add another 1,450 principals to the `bindings` in the `Policy`.
+    { # Associates `members`, or principals, with a `role`.
+      "condition": { # Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. The syntax and semantics of CEL are documented at https://github.com/google/cel-spec. Example (Comparison): title: "Summary size limit" description: "Determines if a summary is less than 100 chars" expression: "document.summary.size() < 100" Example (Equality): title: "Requestor is owner" description: "Determines if requestor is the document owner" expression: "document.owner == request.auth.claims.email" Example (Logic): title: "Public documents" description: "Determine whether the document should be publicly visible" expression: "document.type != 'private' && document.type != 'internal'" Example (Data Manipulation): title: "Notification string" description: "Create a notification string with a timestamp." expression: "'New message received at ' + string(document.create_time)" The exact variables and functions that may be referenced within an expression are determined by the service that evaluates it. See the service documentation for additional information. # The condition that is associated with this binding. If the condition evaluates to `true`, then this binding applies to the current request. If the condition evaluates to `false`, then this binding does not apply to the current request. However, a different role binding might grant the same role to one or more of the principals in this binding. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
+        "description": "A String", # Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI.
+        "expression": "A String", # Textual representation of an expression in Common Expression Language syntax.
+        "location": "A String", # Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file.
+        "title": "A String", # Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression.
+      },
+      "members": [ # Specifies the principals requesting access for a Google Cloud resource. `members` can have the following values: * `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account. * `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account. Does not include identities that come from external identity providers (IdPs) through identity federation. * `user:{emailid}`: An email address that represents a specific Google account. For example, `alice@example.com` . * `serviceAccount:{emailid}`: An email address that represents a Google service account. For example, `my-other-app@appspot.gserviceaccount.com`. * `serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]`: An identifier for a [Kubernetes service account](https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts). For example, `my-project.svc.id.goog[my-namespace/my-kubernetes-sa]`. * `group:{emailid}`: An email address that represents a Google group. For example, `admins@example.com`. * `domain:{domain}`: The G Suite domain (primary) that represents all the users of that domain. For example, `google.com` or `example.com`. * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a user that has been recently deleted. For example, `alice@example.com?uid=123456789012345678901`. If the user is recovered, this value reverts to `user:{emailid}` and the recovered user retains the role in the binding. * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. If the service account is undeleted, this value reverts to `serviceAccount:{emailid}` and the undeleted service account retains the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, `admins@example.com?uid=123456789012345678901`. If the group is recovered, this value reverts to `group:{emailid}` and the recovered group retains the role in the binding.
+        "A String",
+      ],
+      "role": "A String", # Role that is assigned to the list of `members`, or principals. For example, `roles/viewer`, `roles/editor`, or `roles/owner`.
+    },
+  ],
+  "etag": "A String", # `etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the `etag` in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An `etag` is returned in the response to `getIamPolicy`, and systems are expected to put that etag in the request to `setIamPolicy` to ensure that their change will be applied to the same version of the policy. **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost.
+  "version": 42, # Specifies the format of the policy. Valid values are `0`, `1`, and `3`. Requests that specify an invalid value are rejected. Any operation that affects conditional role bindings must specify version `3`. This requirement applies to the following operations: * Getting a policy that includes a conditional role binding * Adding a conditional role binding to a policy * Changing a conditional role binding in a policy * Removing any role binding, with or without a condition, from a policy that includes conditions **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost. If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
+}
+
+ +
+ setIamPolicy(resource, body=None, x__xgafv=None) +
Sets the access control policy on the specified resource. Replaces any existing policy. Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.
+
+Args:
+  resource: string, REQUIRED: The resource for which the policy is being specified. See [Resource names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Request message for `SetIamPolicy` method.
+  "policy": { # An Identity and Access Management (IAM) policy, which specifies access controls for Google Cloud resources. A `Policy` is a collection of `bindings`. A `binding` binds one or more `members`, or principals, to a single `role`. Principals can be user accounts, service accounts, Google groups, and domains (such as G Suite). A `role` is a named list of permissions; each `role` can be an IAM predefined role or a user-created custom role. For some types of Google Cloud resources, a `binding` can also specify a `condition`, which is a logical expression that allows access to a resource only if the expression evaluates to `true`. A condition can add constraints based on attributes of the request, the resource, or both. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies). **JSON example:** ``` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 } ``` **YAML example:** ``` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3 ``` For a description of IAM and its features, see the [IAM documentation](https://cloud.google.com/iam/docs/). # REQUIRED: The complete policy to be applied to the `resource`. The size of the policy is limited to a few 10s of KB. An empty policy is a valid policy but certain Google Cloud services (such as Projects) might reject them.
+    "auditConfigs": [ # Specifies cloud audit logging configuration for this policy.
+      { # Specifies the audit configuration for a service. The configuration determines which permission types are logged, and what identities, if any, are exempted from logging. An AuditConfig must have one or more AuditLogConfigs. If there are AuditConfigs for both `allServices` and a specific service, the union of the two AuditConfigs is used for that service: the log_types specified in each AuditConfig are enabled, and the exempted_members in each AuditLogConfig are exempted. Example Policy with multiple AuditConfigs: { "audit_configs": [ { "service": "allServices", "audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": [ "user:jose@example.com" ] }, { "log_type": "DATA_WRITE" }, { "log_type": "ADMIN_READ" } ] }, { "service": "sampleservice.googleapis.com", "audit_log_configs": [ { "log_type": "DATA_READ" }, { "log_type": "DATA_WRITE", "exempted_members": [ "user:aliya@example.com" ] } ] } ] } For sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ logging. It also exempts `jose@example.com` from DATA_READ logging, and `aliya@example.com` from DATA_WRITE logging.
+        "auditLogConfigs": [ # The configuration for logging of each type of permission.
+          { # Provides the configuration for logging a type of permissions. Example: { "audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": [ "user:jose@example.com" ] }, { "log_type": "DATA_WRITE" } ] } This enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting jose@example.com from DATA_READ logging.
+            "exemptedMembers": [ # Specifies the identities that do not cause logging for this type of permission. Follows the same format of Binding.members.
+              "A String",
+            ],
+            "logType": "A String", # The log type that this config enables.
+          },
+        ],
+        "service": "A String", # Specifies a service that will be enabled for audit logging. For example, `storage.googleapis.com`, `cloudsql.googleapis.com`. `allServices` is a special value that covers all services.
+      },
+    ],
+    "bindings": [ # Associates a list of `members`, or principals, with a `role`. Optionally, may specify a `condition` that determines how and when the `bindings` are applied. Each of the `bindings` must contain at least one principal. The `bindings` in a `Policy` can refer to up to 1,500 principals; up to 250 of these principals can be Google groups. Each occurrence of a principal counts towards these limits. For example, if the `bindings` grant 50 different roles to `user:alice@example.com`, and not to any other principal, then you can add another 1,450 principals to the `bindings` in the `Policy`.
+      { # Associates `members`, or principals, with a `role`.
+        "condition": { # Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. The syntax and semantics of CEL are documented at https://github.com/google/cel-spec. Example (Comparison): title: "Summary size limit" description: "Determines if a summary is less than 100 chars" expression: "document.summary.size() < 100" Example (Equality): title: "Requestor is owner" description: "Determines if requestor is the document owner" expression: "document.owner == request.auth.claims.email" Example (Logic): title: "Public documents" description: "Determine whether the document should be publicly visible" expression: "document.type != 'private' && document.type != 'internal'" Example (Data Manipulation): title: "Notification string" description: "Create a notification string with a timestamp." expression: "'New message received at ' + string(document.create_time)" The exact variables and functions that may be referenced within an expression are determined by the service that evaluates it. See the service documentation for additional information. # The condition that is associated with this binding. If the condition evaluates to `true`, then this binding applies to the current request. If the condition evaluates to `false`, then this binding does not apply to the current request. However, a different role binding might grant the same role to one or more of the principals in this binding. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
+          "description": "A String", # Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI.
+          "expression": "A String", # Textual representation of an expression in Common Expression Language syntax.
+          "location": "A String", # Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file.
+          "title": "A String", # Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression.
+        },
+        "members": [ # Specifies the principals requesting access for a Google Cloud resource. `members` can have the following values: * `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account. * `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account. Does not include identities that come from external identity providers (IdPs) through identity federation. * `user:{emailid}`: An email address that represents a specific Google account. For example, `alice@example.com` . * `serviceAccount:{emailid}`: An email address that represents a Google service account. For example, `my-other-app@appspot.gserviceaccount.com`. * `serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]`: An identifier for a [Kubernetes service account](https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts). For example, `my-project.svc.id.goog[my-namespace/my-kubernetes-sa]`. * `group:{emailid}`: An email address that represents a Google group. For example, `admins@example.com`. * `domain:{domain}`: The G Suite domain (primary) that represents all the users of that domain. For example, `google.com` or `example.com`. * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a user that has been recently deleted. For example, `alice@example.com?uid=123456789012345678901`. If the user is recovered, this value reverts to `user:{emailid}` and the recovered user retains the role in the binding. * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. If the service account is undeleted, this value reverts to `serviceAccount:{emailid}` and the undeleted service account retains the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, `admins@example.com?uid=123456789012345678901`. If the group is recovered, this value reverts to `group:{emailid}` and the recovered group retains the role in the binding.
+          "A String",
+        ],
+        "role": "A String", # Role that is assigned to the list of `members`, or principals. For example, `roles/viewer`, `roles/editor`, or `roles/owner`.
+      },
+    ],
+    "etag": "A String", # `etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the `etag` in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An `etag` is returned in the response to `getIamPolicy`, and systems are expected to put that etag in the request to `setIamPolicy` to ensure that their change will be applied to the same version of the policy. **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost.
+    "version": 42, # Specifies the format of the policy. Valid values are `0`, `1`, and `3`. Requests that specify an invalid value are rejected. Any operation that affects conditional role bindings must specify version `3`. This requirement applies to the following operations: * Getting a policy that includes a conditional role binding * Adding a conditional role binding to a policy * Changing a conditional role binding in a policy * Removing any role binding, with or without a condition, from a policy that includes conditions **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost. If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
+  },
+  "updateMask": "A String", # OPTIONAL: A FieldMask specifying which fields of the policy to modify. Only the fields in the mask will be modified. If no mask is provided, the following default mask is used: `paths: "bindings, etag"`
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # An Identity and Access Management (IAM) policy, which specifies access controls for Google Cloud resources. A `Policy` is a collection of `bindings`. A `binding` binds one or more `members`, or principals, to a single `role`. Principals can be user accounts, service accounts, Google groups, and domains (such as G Suite). A `role` is a named list of permissions; each `role` can be an IAM predefined role or a user-created custom role. For some types of Google Cloud resources, a `binding` can also specify a `condition`, which is a logical expression that allows access to a resource only if the expression evaluates to `true`. A condition can add constraints based on attributes of the request, the resource, or both. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies). **JSON example:** ``` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 } ``` **YAML example:** ``` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3 ``` For a description of IAM and its features, see the [IAM documentation](https://cloud.google.com/iam/docs/).
+  "auditConfigs": [ # Specifies cloud audit logging configuration for this policy.
+    { # Specifies the audit configuration for a service. The configuration determines which permission types are logged, and what identities, if any, are exempted from logging. An AuditConfig must have one or more AuditLogConfigs. If there are AuditConfigs for both `allServices` and a specific service, the union of the two AuditConfigs is used for that service: the log_types specified in each AuditConfig are enabled, and the exempted_members in each AuditLogConfig are exempted. Example Policy with multiple AuditConfigs: { "audit_configs": [ { "service": "allServices", "audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": [ "user:jose@example.com" ] }, { "log_type": "DATA_WRITE" }, { "log_type": "ADMIN_READ" } ] }, { "service": "sampleservice.googleapis.com", "audit_log_configs": [ { "log_type": "DATA_READ" }, { "log_type": "DATA_WRITE", "exempted_members": [ "user:aliya@example.com" ] } ] } ] } For sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ logging. It also exempts `jose@example.com` from DATA_READ logging, and `aliya@example.com` from DATA_WRITE logging.
+      "auditLogConfigs": [ # The configuration for logging of each type of permission.
+        { # Provides the configuration for logging a type of permissions. Example: { "audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": [ "user:jose@example.com" ] }, { "log_type": "DATA_WRITE" } ] } This enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting jose@example.com from DATA_READ logging.
+          "exemptedMembers": [ # Specifies the identities that do not cause logging for this type of permission. Follows the same format of Binding.members.
+            "A String",
+          ],
+          "logType": "A String", # The log type that this config enables.
+        },
+      ],
+      "service": "A String", # Specifies a service that will be enabled for audit logging. For example, `storage.googleapis.com`, `cloudsql.googleapis.com`. `allServices` is a special value that covers all services.
+    },
+  ],
+  "bindings": [ # Associates a list of `members`, or principals, with a `role`. Optionally, may specify a `condition` that determines how and when the `bindings` are applied. Each of the `bindings` must contain at least one principal. The `bindings` in a `Policy` can refer to up to 1,500 principals; up to 250 of these principals can be Google groups. Each occurrence of a principal counts towards these limits. For example, if the `bindings` grant 50 different roles to `user:alice@example.com`, and not to any other principal, then you can add another 1,450 principals to the `bindings` in the `Policy`.
+    { # Associates `members`, or principals, with a `role`.
+      "condition": { # Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. The syntax and semantics of CEL are documented at https://github.com/google/cel-spec. Example (Comparison): title: "Summary size limit" description: "Determines if a summary is less than 100 chars" expression: "document.summary.size() < 100" Example (Equality): title: "Requestor is owner" description: "Determines if requestor is the document owner" expression: "document.owner == request.auth.claims.email" Example (Logic): title: "Public documents" description: "Determine whether the document should be publicly visible" expression: "document.type != 'private' && document.type != 'internal'" Example (Data Manipulation): title: "Notification string" description: "Create a notification string with a timestamp." expression: "'New message received at ' + string(document.create_time)" The exact variables and functions that may be referenced within an expression are determined by the service that evaluates it. See the service documentation for additional information. # The condition that is associated with this binding. If the condition evaluates to `true`, then this binding applies to the current request. If the condition evaluates to `false`, then this binding does not apply to the current request. However, a different role binding might grant the same role to one or more of the principals in this binding. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
+        "description": "A String", # Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI.
+        "expression": "A String", # Textual representation of an expression in Common Expression Language syntax.
+        "location": "A String", # Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file.
+        "title": "A String", # Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression.
+      },
+      "members": [ # Specifies the principals requesting access for a Google Cloud resource. `members` can have the following values: * `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account. * `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account. Does not include identities that come from external identity providers (IdPs) through identity federation. * `user:{emailid}`: An email address that represents a specific Google account. For example, `alice@example.com` . * `serviceAccount:{emailid}`: An email address that represents a Google service account. For example, `my-other-app@appspot.gserviceaccount.com`. * `serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]`: An identifier for a [Kubernetes service account](https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts). For example, `my-project.svc.id.goog[my-namespace/my-kubernetes-sa]`. * `group:{emailid}`: An email address that represents a Google group. For example, `admins@example.com`. * `domain:{domain}`: The G Suite domain (primary) that represents all the users of that domain. For example, `google.com` or `example.com`. * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a user that has been recently deleted. For example, `alice@example.com?uid=123456789012345678901`. If the user is recovered, this value reverts to `user:{emailid}` and the recovered user retains the role in the binding. * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. If the service account is undeleted, this value reverts to `serviceAccount:{emailid}` and the undeleted service account retains the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, `admins@example.com?uid=123456789012345678901`. If the group is recovered, this value reverts to `group:{emailid}` and the recovered group retains the role in the binding.
+        "A String",
+      ],
+      "role": "A String", # Role that is assigned to the list of `members`, or principals. For example, `roles/viewer`, `roles/editor`, or `roles/owner`.
+    },
+  ],
+  "etag": "A String", # `etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the `etag` in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An `etag` is returned in the response to `getIamPolicy`, and systems are expected to put that etag in the request to `setIamPolicy` to ensure that their change will be applied to the same version of the policy. **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost.
+  "version": 42, # Specifies the format of the policy. Valid values are `0`, `1`, and `3`. Requests that specify an invalid value are rejected. Any operation that affects conditional role bindings must specify version `3`. This requirement applies to the following operations: * Getting a policy that includes a conditional role binding * Adding a conditional role binding to a policy * Changing a conditional role binding in a policy * Removing any role binding, with or without a condition, from a policy that includes conditions **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost. If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
+}
+
+ +
+ testIamPermissions(resource, body=None, x__xgafv=None) +
Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a `NOT_FOUND` error. Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may "fail open" without warning.
+
+Args:
+  resource: string, REQUIRED: The resource for which the policy detail is being requested. See [Resource names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Request message for `TestIamPermissions` method.
+  "permissions": [ # The set of permissions to check for the `resource`. Permissions with wildcards (such as `*` or `storage.*`) are not allowed. For more information see [IAM Overview](https://cloud.google.com/iam/docs/overview#permissions).
+    "A String",
+  ],
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response message for `TestIamPermissions` method.
+  "permissions": [ # A subset of `TestPermissionsRequest.permissions` that the caller is allowed.
+    "A String",
+  ],
+}
+
+ + \ No newline at end of file diff --git a/docs/dyn/apphub_v1.projects.locations.html b/docs/dyn/apphub_v1.projects.locations.html new file mode 100644 index 00000000000..a06447ac2d8 --- /dev/null +++ b/docs/dyn/apphub_v1.projects.locations.html @@ -0,0 +1,181 @@ + + + +

App Hub API . projects . locations

+

Instance Methods

+

+ applications() +

+

Returns the applications Resource.

+ +

+ operations() +

+

Returns the operations Resource.

+ +

+ close()

+

Close httplib2 connections.

+

+ get(name, x__xgafv=None)

+

Gets information about a location.

+

+ list(name, filter=None, pageSize=None, pageToken=None, x__xgafv=None)

+

Lists information about the supported locations for this service.

+

+ list_next()

+

Retrieves the next page of results.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ +
+ get(name, x__xgafv=None) +
Gets information about a location.
+
+Args:
+  name: string, Resource name for the location. (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # A resource that represents a Google Cloud location.
+  "displayName": "A String", # The friendly name for this location, typically a nearby city name. For example, "Tokyo".
+  "labels": { # Cross-service attributes for the location. For example {"cloud.googleapis.com/region": "us-east1"}
+    "a_key": "A String",
+  },
+  "locationId": "A String", # The canonical id for this location. For example: `"us-east1"`.
+  "metadata": { # Service-specific metadata. For example the available capacity at the given location.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+  "name": "A String", # Resource name for the location, which may vary between implementations. For example: `"projects/example-project/locations/us-east1"`
+}
+
+ +
+ list(name, filter=None, pageSize=None, pageToken=None, x__xgafv=None) +
Lists information about the supported locations for this service.
+
+Args:
+  name: string, The resource that owns the locations collection, if applicable. (required)
+  filter: string, A filter to narrow down results to a preferred subset. The filtering language accepts strings like `"displayName=tokyo"`, and is documented in more detail in [AIP-160](https://google.aip.dev/160).
+  pageSize: integer, The maximum number of results to return. If not set, the service selects a default.
+  pageToken: string, A page token received from the `next_page_token` field in the response. Send that page token to receive the subsequent page.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # The response message for Locations.ListLocations.
+  "locations": [ # A list of locations that matches the specified filter in the request.
+    { # A resource that represents a Google Cloud location.
+      "displayName": "A String", # The friendly name for this location, typically a nearby city name. For example, "Tokyo".
+      "labels": { # Cross-service attributes for the location. For example {"cloud.googleapis.com/region": "us-east1"}
+        "a_key": "A String",
+      },
+      "locationId": "A String", # The canonical id for this location. For example: `"us-east1"`.
+      "metadata": { # Service-specific metadata. For example the available capacity at the given location.
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+      "name": "A String", # Resource name for the location, which may vary between implementations. For example: `"projects/example-project/locations/us-east1"`
+    },
+  ],
+  "nextPageToken": "A String", # The standard List next-page token.
+}
+
+ +
+ list_next() +
Retrieves the next page of results.
+
+        Args:
+          previous_request: The request for the previous page. (required)
+          previous_response: The response from the request for the previous page. (required)
+
+        Returns:
+          A request object that you can call 'execute()' on to request the next
+          page. Returns None if there are no more items in the collection.
+        
+
+ + \ No newline at end of file diff --git a/docs/dyn/apphub_v1.projects.locations.operations.html b/docs/dyn/apphub_v1.projects.locations.operations.html new file mode 100644 index 00000000000..f04c7522f39 --- /dev/null +++ b/docs/dyn/apphub_v1.projects.locations.operations.html @@ -0,0 +1,235 @@ + + + +

App Hub API . projects . locations . operations

+

Instance Methods

+

+ cancel(name, body=None, x__xgafv=None)

+

Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.

+

+ close()

+

Close httplib2 connections.

+

+ delete(name, x__xgafv=None)

+

Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`.

+

+ get(name, x__xgafv=None)

+

Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.

+

+ list(name, filter=None, pageSize=None, pageToken=None, x__xgafv=None)

+

Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`.

+

+ list_next()

+

Retrieves the next page of results.

+

Method Details

+
+ cancel(name, body=None, x__xgafv=None) +
Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.
+
+Args:
+  name: string, The name of the operation resource to be cancelled. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # The request message for Operations.CancelOperation.
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); }
+}
+
+ +
+ close() +
Close httplib2 connections.
+
+ +
+ delete(name, x__xgafv=None) +
Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`.
+
+Args:
+  name: string, The name of the operation resource to be deleted. (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); }
+}
+
+ +
+ get(name, x__xgafv=None) +
Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.
+
+Args:
+  name: string, The name of the operation resource. (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # This resource represents a long-running operation that is the result of a network API call.
+  "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
+  "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+    "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+    "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+      {
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    ],
+    "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+  },
+  "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+  "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
+  "response": { # The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+}
+
+ +
+ list(name, filter=None, pageSize=None, pageToken=None, x__xgafv=None) +
Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`.
+
+Args:
+  name: string, The name of the operation's parent resource. (required)
+  filter: string, The standard list filter.
+  pageSize: integer, The standard list page size.
+  pageToken: string, The standard list page token.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # The response message for Operations.ListOperations.
+  "nextPageToken": "A String", # The standard List next-page token.
+  "operations": [ # A list of operations that matches the specified filter in the request.
+    { # This resource represents a long-running operation that is the result of a network API call.
+      "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
+      "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+        "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+        "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+          {
+            "a_key": "", # Properties of the object. Contains field @type with type URL.
+          },
+        ],
+        "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+      },
+      "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+      "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
+      "response": { # The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    },
+  ],
+}
+
+ +
+ list_next() +
Retrieves the next page of results.
+
+        Args:
+          previous_request: The request for the previous page. (required)
+          previous_response: The response from the request for the previous page. (required)
+
+        Returns:
+          A request object that you can call 'execute()' on to request the next
+          page. Returns None if there are no more items in the collection.
+        
+
+ + \ No newline at end of file diff --git a/docs/dyn/baremetalsolution_v2.projects.locations.instances.html b/docs/dyn/baremetalsolution_v2.projects.locations.instances.html index ca922b9a739..122bc928f1a 100644 --- a/docs/dyn/baremetalsolution_v2.projects.locations.instances.html +++ b/docs/dyn/baremetalsolution_v2.projects.locations.instances.html @@ -95,6 +95,9 @@

Instance Methods

list_next()

Retrieves the next page of results.

+

+ loadAuthInfo(name, x__xgafv=None)

+

Load auth info for a server.

patch(name, body=None, updateMask=None, x__xgafv=None)

Update details of a single server.

@@ -261,6 +264,7 @@

Method Details

"hyperthreadingEnabled": True or False, # True if you enable hyperthreading for the server, otherwise false. The default value is false. "id": "A String", # Output only. An identifier for the `Instance`, generated by the backend. "interactiveSerialConsoleEnabled": True or False, # Output only. True if the interactive serial console feature is enabled for the instance, false otherwise. The default value is false. + "kmsKeyVersion": "A String", # Optional. Name of the KMS crypto key version used to encrypt the initial passwords. The key has to have ASYMMETRIC_DECRYPT purpose. Format is `projects/{project}/locations/{location}/keyRings/{keyring}/cryptoKeys/{key}/cryptoKeyVersions/{version}`. "labels": { # Labels as key value pairs. "a_key": "A String", }, @@ -291,7 +295,7 @@

Method Details

"multiprotocolType": "A String", # The LUN multiprotocol type ensures the characteristics of the LUN are optimized for each operating system. "name": "A String", # Output only. The name of the LUN. "shareable": True or False, # Display if this LUN can be shared between multiple physical servers. - "sizeGb": "A String", # The size of this LUN, in gigabytes. + "sizeGb": "A String", # The size of this LUN, in GiB. "state": "A String", # The state of this storage volume. "storageType": "A String", # The storage type for this LUN. "storageVolume": "A String", # Display the storage volume for this LUN. @@ -360,6 +364,9 @@

Method Details

], "osImage": "A String", # The OS image currently installed on the server. "pod": "A String", # Immutable. Pod name. Pod is an independent part of infrastructure. Instance can only be connected to the assets (networks, volumes) allocated in the same pod. + "sshKeys": [ # Optional. List of SSH Keys used during instance provisioning. + "A String", + ], "state": "A String", # Output only. The state of the server. "updateTime": "A String", # Output only. Update a time stamp. "volumes": [ # Input only. List of Volumes to attach to this Instance on creation. This field won't be populated in Get/List responses. @@ -428,6 +435,7 @@

Method Details

"hyperthreadingEnabled": True or False, # True if you enable hyperthreading for the server, otherwise false. The default value is false. "id": "A String", # Output only. An identifier for the `Instance`, generated by the backend. "interactiveSerialConsoleEnabled": True or False, # Output only. True if the interactive serial console feature is enabled for the instance, false otherwise. The default value is false. + "kmsKeyVersion": "A String", # Optional. Name of the KMS crypto key version used to encrypt the initial passwords. The key has to have ASYMMETRIC_DECRYPT purpose. Format is `projects/{project}/locations/{location}/keyRings/{keyring}/cryptoKeys/{key}/cryptoKeyVersions/{version}`. "labels": { # Labels as key value pairs. "a_key": "A String", }, @@ -458,7 +466,7 @@

Method Details

"multiprotocolType": "A String", # The LUN multiprotocol type ensures the characteristics of the LUN are optimized for each operating system. "name": "A String", # Output only. The name of the LUN. "shareable": True or False, # Display if this LUN can be shared between multiple physical servers. - "sizeGb": "A String", # The size of this LUN, in gigabytes. + "sizeGb": "A String", # The size of this LUN, in GiB. "state": "A String", # The state of this storage volume. "storageType": "A String", # The storage type for this LUN. "storageVolume": "A String", # Display the storage volume for this LUN. @@ -527,6 +535,9 @@

Method Details

], "osImage": "A String", # The OS image currently installed on the server. "pod": "A String", # Immutable. Pod name. Pod is an independent part of infrastructure. Instance can only be connected to the assets (networks, volumes) allocated in the same pod. + "sshKeys": [ # Optional. List of SSH Keys used during instance provisioning. + "A String", + ], "state": "A String", # Output only. The state of the server. "updateTime": "A String", # Output only. Update a time stamp. "volumes": [ # Input only. List of Volumes to attach to this Instance on creation. This field won't be populated in Get/List responses. @@ -590,6 +601,36 @@

Method Details

+
+ loadAuthInfo(name, x__xgafv=None) +
Load auth info for a server.
+
+Args:
+  name: string, Required. Name of the server. (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response for LoadInstanceAuthInfo.
+  "sshKeys": [ # List of ssh keys.
+    { # An SSH key, used for authorizing with the interactive serial console feature.
+      "name": "A String", # Output only. The name of this SSH key. Currently, the only valid value for the location is "global".
+      "publicKey": "A String", # The public SSH key. This must be in OpenSSH .authorized_keys format.
+    },
+  ],
+  "userAccounts": { # Map of username to the user account info.
+    "a_key": { # User account provisioned for the customer.
+      "encryptedPassword": "A String", # Encrypted initial password value.
+      "kmsKeyVersion": "A String", # KMS CryptoKey Version used to encrypt the password.
+    },
+  },
+}
+
+
patch(name, body=None, updateMask=None, x__xgafv=None)
Update details of a single server.
@@ -605,6 +646,7 @@ 

Method Details

"hyperthreadingEnabled": True or False, # True if you enable hyperthreading for the server, otherwise false. The default value is false. "id": "A String", # Output only. An identifier for the `Instance`, generated by the backend. "interactiveSerialConsoleEnabled": True or False, # Output only. True if the interactive serial console feature is enabled for the instance, false otherwise. The default value is false. + "kmsKeyVersion": "A String", # Optional. Name of the KMS crypto key version used to encrypt the initial passwords. The key has to have ASYMMETRIC_DECRYPT purpose. Format is `projects/{project}/locations/{location}/keyRings/{keyring}/cryptoKeys/{key}/cryptoKeyVersions/{version}`. "labels": { # Labels as key value pairs. "a_key": "A String", }, @@ -635,7 +677,7 @@

Method Details

"multiprotocolType": "A String", # The LUN multiprotocol type ensures the characteristics of the LUN are optimized for each operating system. "name": "A String", # Output only. The name of the LUN. "shareable": True or False, # Display if this LUN can be shared between multiple physical servers. - "sizeGb": "A String", # The size of this LUN, in gigabytes. + "sizeGb": "A String", # The size of this LUN, in GiB. "state": "A String", # The state of this storage volume. "storageType": "A String", # The storage type for this LUN. "storageVolume": "A String", # Display the storage volume for this LUN. @@ -704,6 +746,9 @@

Method Details

], "osImage": "A String", # The OS image currently installed on the server. "pod": "A String", # Immutable. Pod name. Pod is an independent part of infrastructure. Instance can only be connected to the assets (networks, volumes) allocated in the same pod. + "sshKeys": [ # Optional. List of SSH Keys used during instance provisioning. + "A String", + ], "state": "A String", # Output only. The state of the server. "updateTime": "A String", # Output only. Update a time stamp. "volumes": [ # Input only. List of Volumes to attach to this Instance on creation. This field won't be populated in Get/List responses. @@ -803,6 +848,7 @@

Method Details

"hyperthreadingEnabled": True or False, # True if you enable hyperthreading for the server, otherwise false. The default value is false. "id": "A String", # Output only. An identifier for the `Instance`, generated by the backend. "interactiveSerialConsoleEnabled": True or False, # Output only. True if the interactive serial console feature is enabled for the instance, false otherwise. The default value is false. + "kmsKeyVersion": "A String", # Optional. Name of the KMS crypto key version used to encrypt the initial passwords. The key has to have ASYMMETRIC_DECRYPT purpose. Format is `projects/{project}/locations/{location}/keyRings/{keyring}/cryptoKeys/{key}/cryptoKeyVersions/{version}`. "labels": { # Labels as key value pairs. "a_key": "A String", }, @@ -833,7 +879,7 @@

Method Details

"multiprotocolType": "A String", # The LUN multiprotocol type ensures the characteristics of the LUN are optimized for each operating system. "name": "A String", # Output only. The name of the LUN. "shareable": True or False, # Display if this LUN can be shared between multiple physical servers. - "sizeGb": "A String", # The size of this LUN, in gigabytes. + "sizeGb": "A String", # The size of this LUN, in GiB. "state": "A String", # The state of this storage volume. "storageType": "A String", # The storage type for this LUN. "storageVolume": "A String", # Display the storage volume for this LUN. @@ -902,6 +948,9 @@

Method Details

], "osImage": "A String", # The OS image currently installed on the server. "pod": "A String", # Immutable. Pod name. Pod is an independent part of infrastructure. Instance can only be connected to the assets (networks, volumes) allocated in the same pod. + "sshKeys": [ # Optional. List of SSH Keys used during instance provisioning. + "A String", + ], "state": "A String", # Output only. The state of the server. "updateTime": "A String", # Output only. Update a time stamp. "volumes": [ # Input only. List of Volumes to attach to this Instance on creation. This field won't be populated in Get/List responses. diff --git a/docs/dyn/baremetalsolution_v2.projects.locations.provisioningConfigs.html b/docs/dyn/baremetalsolution_v2.projects.locations.provisioningConfigs.html index 02959874c0d..99f77ec8523 100644 --- a/docs/dyn/baremetalsolution_v2.projects.locations.provisioningConfigs.html +++ b/docs/dyn/baremetalsolution_v2.projects.locations.provisioningConfigs.html @@ -120,6 +120,7 @@

Method Details

"hyperthreading": True or False, # Whether the instance should be provisioned with Hyperthreading enabled. "id": "A String", # A transient unique identifier to idenfity an instance within an ProvisioningConfig request. "instanceType": "A String", # Instance type. [Available types](https://cloud.google.com/bare-metal/docs/bms-planning#server_configurations) + "kmsKeyVersion": "A String", # Name of the KMS crypto key version used to encrypt the initial passwords. The key has to have ASYMMETRIC_DECRYPT purpose. "logicalInterfaces": [ # List of logical interfaces for the instance. The number of logical interfaces will be the same as number of hardware bond/nic on the chosen network template. Filled if InstanceConfig.multivlan_config is true. { # Each logical interface represents a logical abstraction of the underlying physical interface (for eg. bond, nic) of the instance. Each logical interface can effectively map to multiple network-IP pairs and still be mapped to one underlying physical interface. "interfaceIndex": 42, # The index of the logical interface mapping to the index of the hardware bond or nic on the chosen network template. This field is deprecated. @@ -238,6 +239,7 @@

Method Details

"hyperthreading": True or False, # Whether the instance should be provisioned with Hyperthreading enabled. "id": "A String", # A transient unique identifier to idenfity an instance within an ProvisioningConfig request. "instanceType": "A String", # Instance type. [Available types](https://cloud.google.com/bare-metal/docs/bms-planning#server_configurations) + "kmsKeyVersion": "A String", # Name of the KMS crypto key version used to encrypt the initial passwords. The key has to have ASYMMETRIC_DECRYPT purpose. "logicalInterfaces": [ # List of logical interfaces for the instance. The number of logical interfaces will be the same as number of hardware bond/nic on the chosen network template. Filled if InstanceConfig.multivlan_config is true. { # Each logical interface represents a logical abstraction of the underlying physical interface (for eg. bond, nic) of the instance. Each logical interface can effectively map to multiple network-IP pairs and still be mapped to one underlying physical interface. "interfaceIndex": 42, # The index of the logical interface mapping to the index of the hardware bond or nic on the chosen network template. This field is deprecated. @@ -362,6 +364,7 @@

Method Details

"hyperthreading": True or False, # Whether the instance should be provisioned with Hyperthreading enabled. "id": "A String", # A transient unique identifier to idenfity an instance within an ProvisioningConfig request. "instanceType": "A String", # Instance type. [Available types](https://cloud.google.com/bare-metal/docs/bms-planning#server_configurations) + "kmsKeyVersion": "A String", # Name of the KMS crypto key version used to encrypt the initial passwords. The key has to have ASYMMETRIC_DECRYPT purpose. "logicalInterfaces": [ # List of logical interfaces for the instance. The number of logical interfaces will be the same as number of hardware bond/nic on the chosen network template. Filled if InstanceConfig.multivlan_config is true. { # Each logical interface represents a logical abstraction of the underlying physical interface (for eg. bond, nic) of the instance. Each logical interface can effectively map to multiple network-IP pairs and still be mapped to one underlying physical interface. "interfaceIndex": 42, # The index of the logical interface mapping to the index of the hardware bond or nic on the chosen network template. This field is deprecated. @@ -481,6 +484,7 @@

Method Details

"hyperthreading": True or False, # Whether the instance should be provisioned with Hyperthreading enabled. "id": "A String", # A transient unique identifier to idenfity an instance within an ProvisioningConfig request. "instanceType": "A String", # Instance type. [Available types](https://cloud.google.com/bare-metal/docs/bms-planning#server_configurations) + "kmsKeyVersion": "A String", # Name of the KMS crypto key version used to encrypt the initial passwords. The key has to have ASYMMETRIC_DECRYPT purpose. "logicalInterfaces": [ # List of logical interfaces for the instance. The number of logical interfaces will be the same as number of hardware bond/nic on the chosen network template. Filled if InstanceConfig.multivlan_config is true. { # Each logical interface represents a logical abstraction of the underlying physical interface (for eg. bond, nic) of the instance. Each logical interface can effectively map to multiple network-IP pairs and still be mapped to one underlying physical interface. "interfaceIndex": 42, # The index of the logical interface mapping to the index of the hardware bond or nic on the chosen network template. This field is deprecated. @@ -600,6 +604,7 @@

Method Details

"hyperthreading": True or False, # Whether the instance should be provisioned with Hyperthreading enabled. "id": "A String", # A transient unique identifier to idenfity an instance within an ProvisioningConfig request. "instanceType": "A String", # Instance type. [Available types](https://cloud.google.com/bare-metal/docs/bms-planning#server_configurations) + "kmsKeyVersion": "A String", # Name of the KMS crypto key version used to encrypt the initial passwords. The key has to have ASYMMETRIC_DECRYPT purpose. "logicalInterfaces": [ # List of logical interfaces for the instance. The number of logical interfaces will be the same as number of hardware bond/nic on the chosen network template. Filled if InstanceConfig.multivlan_config is true. { # Each logical interface represents a logical abstraction of the underlying physical interface (for eg. bond, nic) of the instance. Each logical interface can effectively map to multiple network-IP pairs and still be mapped to one underlying physical interface. "interfaceIndex": 42, # The index of the logical interface mapping to the index of the hardware bond or nic on the chosen network template. This field is deprecated. @@ -721,6 +726,7 @@

Method Details

"hyperthreading": True or False, # Whether the instance should be provisioned with Hyperthreading enabled. "id": "A String", # A transient unique identifier to idenfity an instance within an ProvisioningConfig request. "instanceType": "A String", # Instance type. [Available types](https://cloud.google.com/bare-metal/docs/bms-planning#server_configurations) + "kmsKeyVersion": "A String", # Name of the KMS crypto key version used to encrypt the initial passwords. The key has to have ASYMMETRIC_DECRYPT purpose. "logicalInterfaces": [ # List of logical interfaces for the instance. The number of logical interfaces will be the same as number of hardware bond/nic on the chosen network template. Filled if InstanceConfig.multivlan_config is true. { # Each logical interface represents a logical abstraction of the underlying physical interface (for eg. bond, nic) of the instance. Each logical interface can effectively map to multiple network-IP pairs and still be mapped to one underlying physical interface. "interfaceIndex": 42, # The index of the logical interface mapping to the index of the hardware bond or nic on the chosen network template. This field is deprecated. @@ -840,6 +846,7 @@

Method Details

"hyperthreading": True or False, # Whether the instance should be provisioned with Hyperthreading enabled. "id": "A String", # A transient unique identifier to idenfity an instance within an ProvisioningConfig request. "instanceType": "A String", # Instance type. [Available types](https://cloud.google.com/bare-metal/docs/bms-planning#server_configurations) + "kmsKeyVersion": "A String", # Name of the KMS crypto key version used to encrypt the initial passwords. The key has to have ASYMMETRIC_DECRYPT purpose. "logicalInterfaces": [ # List of logical interfaces for the instance. The number of logical interfaces will be the same as number of hardware bond/nic on the chosen network template. Filled if InstanceConfig.multivlan_config is true. { # Each logical interface represents a logical abstraction of the underlying physical interface (for eg. bond, nic) of the instance. Each logical interface can effectively map to multiple network-IP pairs and still be mapped to one underlying physical interface. "interfaceIndex": 42, # The index of the logical interface mapping to the index of the hardware bond or nic on the chosen network template. This field is deprecated. diff --git a/docs/dyn/baremetalsolution_v2.projects.locations.volumes.luns.html b/docs/dyn/baremetalsolution_v2.projects.locations.volumes.luns.html index 3b025ee304c..cea3d93b144 100644 --- a/docs/dyn/baremetalsolution_v2.projects.locations.volumes.luns.html +++ b/docs/dyn/baremetalsolution_v2.projects.locations.volumes.luns.html @@ -160,7 +160,7 @@

Method Details

"multiprotocolType": "A String", # The LUN multiprotocol type ensures the characteristics of the LUN are optimized for each operating system. "name": "A String", # Output only. The name of the LUN. "shareable": True or False, # Display if this LUN can be shared between multiple physical servers. - "sizeGb": "A String", # The size of this LUN, in gigabytes. + "sizeGb": "A String", # The size of this LUN, in GiB. "state": "A String", # The state of this storage volume. "storageType": "A String", # The storage type for this LUN. "storageVolume": "A String", # Display the storage volume for this LUN. @@ -196,7 +196,7 @@

Method Details

"multiprotocolType": "A String", # The LUN multiprotocol type ensures the characteristics of the LUN are optimized for each operating system. "name": "A String", # Output only. The name of the LUN. "shareable": True or False, # Display if this LUN can be shared between multiple physical servers. - "sizeGb": "A String", # The size of this LUN, in gigabytes. + "sizeGb": "A String", # The size of this LUN, in GiB. "state": "A String", # The state of this storage volume. "storageType": "A String", # The storage type for this LUN. "storageVolume": "A String", # Display the storage volume for this LUN. diff --git a/docs/dyn/batch_v1.projects.locations.jobs.html b/docs/dyn/batch_v1.projects.locations.jobs.html index 73cfbe03485..78613010760 100644 --- a/docs/dyn/batch_v1.projects.locations.jobs.html +++ b/docs/dyn/batch_v1.projects.locations.jobs.html @@ -187,8 +187,6 @@

Method Details

"a_key": "A String", }, "logsPolicy": { # LogsPolicy describes how outputs from a Job's Tasks (stdout/stderr) will be preserved. # Log preservation policy for the Job. - "cloudLoggingOption": { # CloudLoggingOption contains additional settings for cloud logging generated by Batch job. # Optional. Additional settings for Cloud Logging. It will only take effect when the destination of LogsPolicy is set to CLOUD_LOGGING. - }, "destination": "A String", # Where logs should be saved. "logsPath": "A String", # The path to which logs are saved when the destination = PATH. This can be a local file path on the VM, or under the mount point of a Persistent Disk or Filestore, or a Cloud Storage path. }, @@ -308,6 +306,7 @@

Method Details

"commands": [ # Overrides the `CMD` specified in the container. If there is an ENTRYPOINT (either in the container image or with the entrypoint field below) then commands are appended as arguments to the ENTRYPOINT. "A String", ], + "enableImageStreaming": True or False, # Optional. If set to true, container will run with Image streaming. The container runtime will be changed to containerd instead of docker. Currently, only imageUri, commands, entrypoint and volumes are supported and any other fields will be ignored. Please refer [here](https://github.com/GoogleCloudPlatform/batch-samples/tree/main/api-samples/image-streaming) for the feature requirements and limitations. "entrypoint": "A String", # Overrides the `ENTRYPOINT` specified in the container. "imageUri": "A String", # The URI to pull the container image from. "options": "A String", # Arbitrary additional options to include in the "docker run" command when running this container, e.g. "--network host". @@ -449,8 +448,6 @@

Method Details

"a_key": "A String", }, "logsPolicy": { # LogsPolicy describes how outputs from a Job's Tasks (stdout/stderr) will be preserved. # Log preservation policy for the Job. - "cloudLoggingOption": { # CloudLoggingOption contains additional settings for cloud logging generated by Batch job. # Optional. Additional settings for Cloud Logging. It will only take effect when the destination of LogsPolicy is set to CLOUD_LOGGING. - }, "destination": "A String", # Where logs should be saved. "logsPath": "A String", # The path to which logs are saved when the destination = PATH. This can be a local file path on the VM, or under the mount point of a Persistent Disk or Filestore, or a Cloud Storage path. }, @@ -570,6 +567,7 @@

Method Details

"commands": [ # Overrides the `CMD` specified in the container. If there is an ENTRYPOINT (either in the container image or with the entrypoint field below) then commands are appended as arguments to the ENTRYPOINT. "A String", ], + "enableImageStreaming": True or False, # Optional. If set to true, container will run with Image streaming. The container runtime will be changed to containerd instead of docker. Currently, only imageUri, commands, entrypoint and volumes are supported and any other fields will be ignored. Please refer [here](https://github.com/GoogleCloudPlatform/batch-samples/tree/main/api-samples/image-streaming) for the feature requirements and limitations. "entrypoint": "A String", # Overrides the `ENTRYPOINT` specified in the container. "imageUri": "A String", # The URI to pull the container image from. "options": "A String", # Arbitrary additional options to include in the "docker run" command when running this container, e.g. "--network host". @@ -753,8 +751,6 @@

Method Details

"a_key": "A String", }, "logsPolicy": { # LogsPolicy describes how outputs from a Job's Tasks (stdout/stderr) will be preserved. # Log preservation policy for the Job. - "cloudLoggingOption": { # CloudLoggingOption contains additional settings for cloud logging generated by Batch job. # Optional. Additional settings for Cloud Logging. It will only take effect when the destination of LogsPolicy is set to CLOUD_LOGGING. - }, "destination": "A String", # Where logs should be saved. "logsPath": "A String", # The path to which logs are saved when the destination = PATH. This can be a local file path on the VM, or under the mount point of a Persistent Disk or Filestore, or a Cloud Storage path. }, @@ -874,6 +870,7 @@

Method Details

"commands": [ # Overrides the `CMD` specified in the container. If there is an ENTRYPOINT (either in the container image or with the entrypoint field below) then commands are appended as arguments to the ENTRYPOINT. "A String", ], + "enableImageStreaming": True or False, # Optional. If set to true, container will run with Image streaming. The container runtime will be changed to containerd instead of docker. Currently, only imageUri, commands, entrypoint and volumes are supported and any other fields will be ignored. Please refer [here](https://github.com/GoogleCloudPlatform/batch-samples/tree/main/api-samples/image-streaming) for the feature requirements and limitations. "entrypoint": "A String", # Overrides the `ENTRYPOINT` specified in the container. "imageUri": "A String", # The URI to pull the container image from. "options": "A String", # Arbitrary additional options to include in the "docker run" command when running this container, e.g. "--network host". @@ -1026,8 +1023,6 @@

Method Details

"a_key": "A String", }, "logsPolicy": { # LogsPolicy describes how outputs from a Job's Tasks (stdout/stderr) will be preserved. # Log preservation policy for the Job. - "cloudLoggingOption": { # CloudLoggingOption contains additional settings for cloud logging generated by Batch job. # Optional. Additional settings for Cloud Logging. It will only take effect when the destination of LogsPolicy is set to CLOUD_LOGGING. - }, "destination": "A String", # Where logs should be saved. "logsPath": "A String", # The path to which logs are saved when the destination = PATH. This can be a local file path on the VM, or under the mount point of a Persistent Disk or Filestore, or a Cloud Storage path. }, @@ -1147,6 +1142,7 @@

Method Details

"commands": [ # Overrides the `CMD` specified in the container. If there is an ENTRYPOINT (either in the container image or with the entrypoint field below) then commands are appended as arguments to the ENTRYPOINT. "A String", ], + "enableImageStreaming": True or False, # Optional. If set to true, container will run with Image streaming. The container runtime will be changed to containerd instead of docker. Currently, only imageUri, commands, entrypoint and volumes are supported and any other fields will be ignored. Please refer [here](https://github.com/GoogleCloudPlatform/batch-samples/tree/main/api-samples/image-streaming) for the feature requirements and limitations. "entrypoint": "A String", # Overrides the `ENTRYPOINT` specified in the container. "imageUri": "A String", # The URI to pull the container image from. "options": "A String", # Arbitrary additional options to include in the "docker run" command when running this container, e.g. "--network host". diff --git a/docs/dyn/batch_v1.projects.locations.state.html b/docs/dyn/batch_v1.projects.locations.state.html index 448b48fc6ca..cb58c99ec92 100644 --- a/docs/dyn/batch_v1.projects.locations.state.html +++ b/docs/dyn/batch_v1.projects.locations.state.html @@ -256,6 +256,7 @@

Method Details

"commands": [ # Overrides the `CMD` specified in the container. If there is an ENTRYPOINT (either in the container image or with the entrypoint field below) then commands are appended as arguments to the ENTRYPOINT. "A String", ], + "enableImageStreaming": True or False, # Optional. If set to true, container will run with Image streaming. The container runtime will be changed to containerd instead of docker. Currently, only imageUri, commands, entrypoint and volumes are supported and any other fields will be ignored. Please refer [here](https://github.com/GoogleCloudPlatform/batch-samples/tree/main/api-samples/image-streaming) for the feature requirements and limitations. "entrypoint": "A String", # Overrides the `ENTRYPOINT` specified in the container. "imageUri": "A String", # The URI to pull the container image from. "options": "A String", # Arbitrary additional options to include in the "docker run" command when running this container, e.g. "--network host". diff --git a/docs/dyn/bigquery_v2.models.html b/docs/dyn/bigquery_v2.models.html index 608a360819a..e9dbd9bf7ea 100644 --- a/docs/dyn/bigquery_v2.models.html +++ b/docs/dyn/bigquery_v2.models.html @@ -801,6 +801,7 @@

Method Details

"maxBatchingRows": "A String", # Output only. Max number of rows in each batch sent to the remote service. If unset, the number of rows in each batch is set dynamically. "remoteModelVersion": "A String", # Output only. The model version for LLM. "remoteServiceType": "A String", # Output only. The remote service type for remote model. + "speechRecognizer": "A String", # Output only. The name of the speech recognizer to use for speech recognition. The expected format is `projects/{project}/locations/{location}/recognizers/{recognizer}`. Customers can specify this field at model creation. If not specified, a default recognizer `projects/{model project}/locations/global/recognizers/_` will be used. See more details at [recognizers](https://cloud.google.com/speech-to-text/v2/docs/reference/rest/v2/projects.locations.recognizers) }, "trainingRuns": [ # Information for all training runs in increasing order of start_time. { # Information about a single training query run for the model. @@ -1821,6 +1822,7 @@

Method Details

"maxBatchingRows": "A String", # Output only. Max number of rows in each batch sent to the remote service. If unset, the number of rows in each batch is set dynamically. "remoteModelVersion": "A String", # Output only. The model version for LLM. "remoteServiceType": "A String", # Output only. The remote service type for remote model. + "speechRecognizer": "A String", # Output only. The name of the speech recognizer to use for speech recognition. The expected format is `projects/{project}/locations/{location}/recognizers/{recognizer}`. Customers can specify this field at model creation. If not specified, a default recognizer `projects/{model project}/locations/global/recognizers/_` will be used. See more details at [recognizers](https://cloud.google.com/speech-to-text/v2/docs/reference/rest/v2/projects.locations.recognizers) }, "trainingRuns": [ # Information for all training runs in increasing order of start_time. { # Information about a single training query run for the model. @@ -2854,6 +2856,7 @@

Method Details

"maxBatchingRows": "A String", # Output only. Max number of rows in each batch sent to the remote service. If unset, the number of rows in each batch is set dynamically. "remoteModelVersion": "A String", # Output only. The model version for LLM. "remoteServiceType": "A String", # Output only. The remote service type for remote model. + "speechRecognizer": "A String", # Output only. The name of the speech recognizer to use for speech recognition. The expected format is `projects/{project}/locations/{location}/recognizers/{recognizer}`. Customers can specify this field at model creation. If not specified, a default recognizer `projects/{model project}/locations/global/recognizers/_` will be used. See more details at [recognizers](https://cloud.google.com/speech-to-text/v2/docs/reference/rest/v2/projects.locations.recognizers) }, "trainingRuns": [ # Information for all training runs in increasing order of start_time. { # Information about a single training query run for the model. @@ -3862,6 +3865,7 @@

Method Details

"maxBatchingRows": "A String", # Output only. Max number of rows in each batch sent to the remote service. If unset, the number of rows in each batch is set dynamically. "remoteModelVersion": "A String", # Output only. The model version for LLM. "remoteServiceType": "A String", # Output only. The remote service type for remote model. + "speechRecognizer": "A String", # Output only. The name of the speech recognizer to use for speech recognition. The expected format is `projects/{project}/locations/{location}/recognizers/{recognizer}`. Customers can specify this field at model creation. If not specified, a default recognizer `projects/{model project}/locations/global/recognizers/_` will be used. See more details at [recognizers](https://cloud.google.com/speech-to-text/v2/docs/reference/rest/v2/projects.locations.recognizers) }, "trainingRuns": [ # Information for all training runs in increasing order of start_time. { # Information about a single training query run for the model. diff --git a/docs/dyn/cloudbilling_v1.billingAccounts.html b/docs/dyn/cloudbilling_v1.billingAccounts.html index 59f2abbda34..cc749a049f8 100644 --- a/docs/dyn/cloudbilling_v1.billingAccounts.html +++ b/docs/dyn/cloudbilling_v1.billingAccounts.html @@ -133,10 +133,10 @@

Method Details

"masterBillingAccount": "A String", # If this account is a [subaccount](https://cloud.google.com/billing/docs/concepts), then this will be the resource name of the parent billing account that it is being resold through. Otherwise this will be empty. "name": "A String", # Output only. The resource name of the billing account. The resource name has the form `billingAccounts/{billing_account_id}`. For example, `billingAccounts/012345-567890-ABCDEF` would be the resource name for billing account `012345-567890-ABCDEF`. "open": True or False, # Output only. True if the billing account is open, and will therefore be charged for any usage on associated projects. False if the billing account is closed, and therefore projects associated with it will be unable to use paid services. - "parent": "A String", # Output only. The billing account's parent resource identifier. Use the `MoveBillingAccount` method to update the account's parent resource if it is a organization. Format: - organizations/{organization_id}, for example: organizations/12345678 - billingAccounts/{billing_account_id}, for example: `billingAccounts/012345-567890-ABCDEF` + "parent": "A String", # Output only. The billing account's parent resource identifier. Use the `MoveBillingAccount` method to update the account's parent resource if it is a organization. Format: - `organizations/{organization_id}`, for example, `organizations/12345678` - `billingAccounts/{billing_account_id}`, for example, `billingAccounts/012345-567890-ABCDEF` } - parent: string, Optional. The parent to create a billing account from. Format: - organizations/{organization_id} eg organizations/12345678 - billingAccounts/{billing_account_id} eg `billingAccounts/012345-567890-ABCDEF` + parent: string, Optional. The parent to create a billing account from. Format: - `organizations/{organization_id}`, for example, `organizations/12345678` - `billingAccounts/{billing_account_id}`, for example, `billingAccounts/012345-567890-ABCDEF` x__xgafv: string, V1 error format. Allowed values 1 - v1 error format @@ -150,7 +150,7 @@

Method Details

"masterBillingAccount": "A String", # If this account is a [subaccount](https://cloud.google.com/billing/docs/concepts), then this will be the resource name of the parent billing account that it is being resold through. Otherwise this will be empty. "name": "A String", # Output only. The resource name of the billing account. The resource name has the form `billingAccounts/{billing_account_id}`. For example, `billingAccounts/012345-567890-ABCDEF` would be the resource name for billing account `012345-567890-ABCDEF`. "open": True or False, # Output only. True if the billing account is open, and will therefore be charged for any usage on associated projects. False if the billing account is closed, and therefore projects associated with it will be unable to use paid services. - "parent": "A String", # Output only. The billing account's parent resource identifier. Use the `MoveBillingAccount` method to update the account's parent resource if it is a organization. Format: - organizations/{organization_id}, for example: organizations/12345678 - billingAccounts/{billing_account_id}, for example: `billingAccounts/012345-567890-ABCDEF` + "parent": "A String", # Output only. The billing account's parent resource identifier. Use the `MoveBillingAccount` method to update the account's parent resource if it is a organization. Format: - `organizations/{organization_id}`, for example, `organizations/12345678` - `billingAccounts/{billing_account_id}`, for example, `billingAccounts/012345-567890-ABCDEF` }
@@ -173,7 +173,7 @@

Method Details

"masterBillingAccount": "A String", # If this account is a [subaccount](https://cloud.google.com/billing/docs/concepts), then this will be the resource name of the parent billing account that it is being resold through. Otherwise this will be empty. "name": "A String", # Output only. The resource name of the billing account. The resource name has the form `billingAccounts/{billing_account_id}`. For example, `billingAccounts/012345-567890-ABCDEF` would be the resource name for billing account `012345-567890-ABCDEF`. "open": True or False, # Output only. True if the billing account is open, and will therefore be charged for any usage on associated projects. False if the billing account is closed, and therefore projects associated with it will be unable to use paid services. - "parent": "A String", # Output only. The billing account's parent resource identifier. Use the `MoveBillingAccount` method to update the account's parent resource if it is a organization. Format: - organizations/{organization_id}, for example: organizations/12345678 - billingAccounts/{billing_account_id}, for example: `billingAccounts/012345-567890-ABCDEF` + "parent": "A String", # Output only. The billing account's parent resource identifier. Use the `MoveBillingAccount` method to update the account's parent resource if it is a organization. Format: - `organizations/{organization_id}`, for example, `organizations/12345678` - `billingAccounts/{billing_account_id}`, for example, `billingAccounts/012345-567890-ABCDEF` }
@@ -230,10 +230,10 @@

Method Details

Lists the billing accounts that the current authenticated user has permission to [view](https://cloud.google.com/billing/docs/how-to/billing-access).
 
 Args:
-  filter: string, Options for how to filter the returned billing accounts. This only supports filtering for [subaccounts](https://cloud.google.com/billing/docs/concepts) under a single provided parent billing account. (e.g. "master_billing_account=billingAccounts/012345-678901-ABCDEF"). Boolean algebra and other fields are not currently supported.
+  filter: string, Options for how to filter the returned billing accounts. This only supports filtering for [subaccounts](https://cloud.google.com/billing/docs/concepts) under a single provided parent billing account. (for example, `master_billing_account=billingAccounts/012345-678901-ABCDEF`). Boolean algebra and other fields are not currently supported.
   pageSize: integer, Requested page size. The maximum page size is 100; this is also the default.
   pageToken: string, A token identifying a page of results to return. This should be a `next_page_token` value returned from a previous `ListBillingAccounts` call. If unspecified, the first page of results is returned.
-  parent: string, Optional. The parent resource to list billing accounts from. Format: - organizations/{organization_id} eg organizations/12345678 - billingAccounts/{billing_account_id} eg `billingAccounts/012345-567890-ABCDEF`
+  parent: string, Optional. The parent resource to list billing accounts from. Format: - `organizations/{organization_id}`, for example, `organizations/12345678` - `billingAccounts/{billing_account_id}`, for example, `billingAccounts/012345-567890-ABCDEF`
   x__xgafv: string, V1 error format.
     Allowed values
       1 - v1 error format
@@ -249,7 +249,7 @@ 

Method Details

"masterBillingAccount": "A String", # If this account is a [subaccount](https://cloud.google.com/billing/docs/concepts), then this will be the resource name of the parent billing account that it is being resold through. Otherwise this will be empty. "name": "A String", # Output only. The resource name of the billing account. The resource name has the form `billingAccounts/{billing_account_id}`. For example, `billingAccounts/012345-567890-ABCDEF` would be the resource name for billing account `012345-567890-ABCDEF`. "open": True or False, # Output only. True if the billing account is open, and will therefore be charged for any usage on associated projects. False if the billing account is closed, and therefore projects associated with it will be unable to use paid services. - "parent": "A String", # Output only. The billing account's parent resource identifier. Use the `MoveBillingAccount` method to update the account's parent resource if it is a organization. Format: - organizations/{organization_id}, for example: organizations/12345678 - billingAccounts/{billing_account_id}, for example: `billingAccounts/012345-567890-ABCDEF` + "parent": "A String", # Output only. The billing account's parent resource identifier. Use the `MoveBillingAccount` method to update the account's parent resource if it is a organization. Format: - `organizations/{organization_id}`, for example, `organizations/12345678` - `billingAccounts/{billing_account_id}`, for example, `billingAccounts/012345-567890-ABCDEF` }, ], "nextPageToken": "A String", # A token to retrieve the next page of results. To retrieve the next page, call `ListBillingAccounts` again with the `page_token` field set to this value. This field is empty if there are no more results to retrieve. @@ -296,7 +296,7 @@

Method Details

"masterBillingAccount": "A String", # If this account is a [subaccount](https://cloud.google.com/billing/docs/concepts), then this will be the resource name of the parent billing account that it is being resold through. Otherwise this will be empty. "name": "A String", # Output only. The resource name of the billing account. The resource name has the form `billingAccounts/{billing_account_id}`. For example, `billingAccounts/012345-567890-ABCDEF` would be the resource name for billing account `012345-567890-ABCDEF`. "open": True or False, # Output only. True if the billing account is open, and will therefore be charged for any usage on associated projects. False if the billing account is closed, and therefore projects associated with it will be unable to use paid services. - "parent": "A String", # Output only. The billing account's parent resource identifier. Use the `MoveBillingAccount` method to update the account's parent resource if it is a organization. Format: - organizations/{organization_id}, for example: organizations/12345678 - billingAccounts/{billing_account_id}, for example: `billingAccounts/012345-567890-ABCDEF` + "parent": "A String", # Output only. The billing account's parent resource identifier. Use the `MoveBillingAccount` method to update the account's parent resource if it is a organization. Format: - `organizations/{organization_id}`, for example, `organizations/12345678` - `billingAccounts/{billing_account_id}`, for example, `billingAccounts/012345-567890-ABCDEF` }
@@ -314,7 +314,7 @@

Method Details

"masterBillingAccount": "A String", # If this account is a [subaccount](https://cloud.google.com/billing/docs/concepts), then this will be the resource name of the parent billing account that it is being resold through. Otherwise this will be empty. "name": "A String", # Output only. The resource name of the billing account. The resource name has the form `billingAccounts/{billing_account_id}`. For example, `billingAccounts/012345-567890-ABCDEF` would be the resource name for billing account `012345-567890-ABCDEF`. "open": True or False, # Output only. True if the billing account is open, and will therefore be charged for any usage on associated projects. False if the billing account is closed, and therefore projects associated with it will be unable to use paid services. - "parent": "A String", # Output only. The billing account's parent resource identifier. Use the `MoveBillingAccount` method to update the account's parent resource if it is a organization. Format: - organizations/{organization_id}, for example: organizations/12345678 - billingAccounts/{billing_account_id}, for example: `billingAccounts/012345-567890-ABCDEF` + "parent": "A String", # Output only. The billing account's parent resource identifier. Use the `MoveBillingAccount` method to update the account's parent resource if it is a organization. Format: - `organizations/{organization_id}`, for example, `organizations/12345678` - `billingAccounts/{billing_account_id}`, for example, `billingAccounts/012345-567890-ABCDEF` } updateMask: string, The update mask applied to the resource. Only "display_name" is currently supported. @@ -331,7 +331,7 @@

Method Details

"masterBillingAccount": "A String", # If this account is a [subaccount](https://cloud.google.com/billing/docs/concepts), then this will be the resource name of the parent billing account that it is being resold through. Otherwise this will be empty. "name": "A String", # Output only. The resource name of the billing account. The resource name has the form `billingAccounts/{billing_account_id}`. For example, `billingAccounts/012345-567890-ABCDEF` would be the resource name for billing account `012345-567890-ABCDEF`. "open": True or False, # Output only. True if the billing account is open, and will therefore be charged for any usage on associated projects. False if the billing account is closed, and therefore projects associated with it will be unable to use paid services. - "parent": "A String", # Output only. The billing account's parent resource identifier. Use the `MoveBillingAccount` method to update the account's parent resource if it is a organization. Format: - organizations/{organization_id}, for example: organizations/12345678 - billingAccounts/{billing_account_id}, for example: `billingAccounts/012345-567890-ABCDEF` + "parent": "A String", # Output only. The billing account's parent resource identifier. Use the `MoveBillingAccount` method to update the account's parent resource if it is a organization. Format: - `organizations/{organization_id}`, for example, `organizations/12345678` - `billingAccounts/{billing_account_id}`, for example, `billingAccounts/012345-567890-ABCDEF` } diff --git a/docs/dyn/cloudbilling_v1.billingAccounts.subAccounts.html b/docs/dyn/cloudbilling_v1.billingAccounts.subAccounts.html index eb7f294971d..54312d83c88 100644 --- a/docs/dyn/cloudbilling_v1.billingAccounts.subAccounts.html +++ b/docs/dyn/cloudbilling_v1.billingAccounts.subAccounts.html @@ -97,7 +97,7 @@

Method Details

This method creates [billing subaccounts](https://cloud.google.com/billing/docs/concepts#subaccounts). Google Cloud resellers should use the Channel Services APIs, [accounts.customers.create](https://cloud.google.com/channel/docs/reference/rest/v1/accounts.customers/create) and [accounts.customers.entitlements.create](https://cloud.google.com/channel/docs/reference/rest/v1/accounts.customers.entitlements/create). When creating a subaccount, the current authenticated user must have the `billing.accounts.update` IAM permission on the parent account, which is typically given to billing account [administrators](https://cloud.google.com/billing/docs/how-to/billing-access). This method will return an error if the parent account has not been provisioned for subaccounts.
 
 Args:
-  parent: string, Optional. The parent to create a billing account from. Format: - organizations/{organization_id} eg organizations/12345678 - billingAccounts/{billing_account_id} eg `billingAccounts/012345-567890-ABCDEF` (required)
+  parent: string, Optional. The parent to create a billing account from. Format: - `organizations/{organization_id}`, for example, `organizations/12345678` - `billingAccounts/{billing_account_id}`, for example, `billingAccounts/012345-567890-ABCDEF` (required)
   body: object, The request body.
     The object takes the form of:
 
@@ -106,7 +106,7 @@ 

Method Details

"masterBillingAccount": "A String", # If this account is a [subaccount](https://cloud.google.com/billing/docs/concepts), then this will be the resource name of the parent billing account that it is being resold through. Otherwise this will be empty. "name": "A String", # Output only. The resource name of the billing account. The resource name has the form `billingAccounts/{billing_account_id}`. For example, `billingAccounts/012345-567890-ABCDEF` would be the resource name for billing account `012345-567890-ABCDEF`. "open": True or False, # Output only. True if the billing account is open, and will therefore be charged for any usage on associated projects. False if the billing account is closed, and therefore projects associated with it will be unable to use paid services. - "parent": "A String", # Output only. The billing account's parent resource identifier. Use the `MoveBillingAccount` method to update the account's parent resource if it is a organization. Format: - organizations/{organization_id}, for example: organizations/12345678 - billingAccounts/{billing_account_id}, for example: `billingAccounts/012345-567890-ABCDEF` + "parent": "A String", # Output only. The billing account's parent resource identifier. Use the `MoveBillingAccount` method to update the account's parent resource if it is a organization. Format: - `organizations/{organization_id}`, for example, `organizations/12345678` - `billingAccounts/{billing_account_id}`, for example, `billingAccounts/012345-567890-ABCDEF` } x__xgafv: string, V1 error format. @@ -122,7 +122,7 @@

Method Details

"masterBillingAccount": "A String", # If this account is a [subaccount](https://cloud.google.com/billing/docs/concepts), then this will be the resource name of the parent billing account that it is being resold through. Otherwise this will be empty. "name": "A String", # Output only. The resource name of the billing account. The resource name has the form `billingAccounts/{billing_account_id}`. For example, `billingAccounts/012345-567890-ABCDEF` would be the resource name for billing account `012345-567890-ABCDEF`. "open": True or False, # Output only. True if the billing account is open, and will therefore be charged for any usage on associated projects. False if the billing account is closed, and therefore projects associated with it will be unable to use paid services. - "parent": "A String", # Output only. The billing account's parent resource identifier. Use the `MoveBillingAccount` method to update the account's parent resource if it is a organization. Format: - organizations/{organization_id}, for example: organizations/12345678 - billingAccounts/{billing_account_id}, for example: `billingAccounts/012345-567890-ABCDEF` + "parent": "A String", # Output only. The billing account's parent resource identifier. Use the `MoveBillingAccount` method to update the account's parent resource if it is a organization. Format: - `organizations/{organization_id}`, for example, `organizations/12345678` - `billingAccounts/{billing_account_id}`, for example, `billingAccounts/012345-567890-ABCDEF` }
@@ -131,8 +131,8 @@

Method Details

Lists the billing accounts that the current authenticated user has permission to [view](https://cloud.google.com/billing/docs/how-to/billing-access).
 
 Args:
-  parent: string, Optional. The parent resource to list billing accounts from. Format: - organizations/{organization_id} eg organizations/12345678 - billingAccounts/{billing_account_id} eg `billingAccounts/012345-567890-ABCDEF` (required)
-  filter: string, Options for how to filter the returned billing accounts. This only supports filtering for [subaccounts](https://cloud.google.com/billing/docs/concepts) under a single provided parent billing account. (e.g. "master_billing_account=billingAccounts/012345-678901-ABCDEF"). Boolean algebra and other fields are not currently supported.
+  parent: string, Optional. The parent resource to list billing accounts from. Format: - `organizations/{organization_id}`, for example, `organizations/12345678` - `billingAccounts/{billing_account_id}`, for example, `billingAccounts/012345-567890-ABCDEF` (required)
+  filter: string, Options for how to filter the returned billing accounts. This only supports filtering for [subaccounts](https://cloud.google.com/billing/docs/concepts) under a single provided parent billing account. (for example, `master_billing_account=billingAccounts/012345-678901-ABCDEF`). Boolean algebra and other fields are not currently supported.
   pageSize: integer, Requested page size. The maximum page size is 100; this is also the default.
   pageToken: string, A token identifying a page of results to return. This should be a `next_page_token` value returned from a previous `ListBillingAccounts` call. If unspecified, the first page of results is returned.
   x__xgafv: string, V1 error format.
@@ -150,7 +150,7 @@ 

Method Details

"masterBillingAccount": "A String", # If this account is a [subaccount](https://cloud.google.com/billing/docs/concepts), then this will be the resource name of the parent billing account that it is being resold through. Otherwise this will be empty. "name": "A String", # Output only. The resource name of the billing account. The resource name has the form `billingAccounts/{billing_account_id}`. For example, `billingAccounts/012345-567890-ABCDEF` would be the resource name for billing account `012345-567890-ABCDEF`. "open": True or False, # Output only. True if the billing account is open, and will therefore be charged for any usage on associated projects. False if the billing account is closed, and therefore projects associated with it will be unable to use paid services. - "parent": "A String", # Output only. The billing account's parent resource identifier. Use the `MoveBillingAccount` method to update the account's parent resource if it is a organization. Format: - organizations/{organization_id}, for example: organizations/12345678 - billingAccounts/{billing_account_id}, for example: `billingAccounts/012345-567890-ABCDEF` + "parent": "A String", # Output only. The billing account's parent resource identifier. Use the `MoveBillingAccount` method to update the account's parent resource if it is a organization. Format: - `organizations/{organization_id}`, for example, `organizations/12345678` - `billingAccounts/{billing_account_id}`, for example, `billingAccounts/012345-567890-ABCDEF` }, ], "nextPageToken": "A String", # A token to retrieve the next page of results. To retrieve the next page, call `ListBillingAccounts` again with the `page_token` field set to this value. This field is empty if there are no more results to retrieve. diff --git a/docs/dyn/cloudbilling_v1.organizations.billingAccounts.html b/docs/dyn/cloudbilling_v1.organizations.billingAccounts.html index 0ff45d23ac1..b79571c85e6 100644 --- a/docs/dyn/cloudbilling_v1.organizations.billingAccounts.html +++ b/docs/dyn/cloudbilling_v1.organizations.billingAccounts.html @@ -100,7 +100,7 @@

Method Details

This method creates [billing subaccounts](https://cloud.google.com/billing/docs/concepts#subaccounts). Google Cloud resellers should use the Channel Services APIs, [accounts.customers.create](https://cloud.google.com/channel/docs/reference/rest/v1/accounts.customers/create) and [accounts.customers.entitlements.create](https://cloud.google.com/channel/docs/reference/rest/v1/accounts.customers.entitlements/create). When creating a subaccount, the current authenticated user must have the `billing.accounts.update` IAM permission on the parent account, which is typically given to billing account [administrators](https://cloud.google.com/billing/docs/how-to/billing-access). This method will return an error if the parent account has not been provisioned for subaccounts.
 
 Args:
-  parent: string, Optional. The parent to create a billing account from. Format: - organizations/{organization_id} eg organizations/12345678 - billingAccounts/{billing_account_id} eg `billingAccounts/012345-567890-ABCDEF` (required)
+  parent: string, Optional. The parent to create a billing account from. Format: - `organizations/{organization_id}`, for example, `organizations/12345678` - `billingAccounts/{billing_account_id}`, for example, `billingAccounts/012345-567890-ABCDEF` (required)
   body: object, The request body.
     The object takes the form of:
 
@@ -109,7 +109,7 @@ 

Method Details

"masterBillingAccount": "A String", # If this account is a [subaccount](https://cloud.google.com/billing/docs/concepts), then this will be the resource name of the parent billing account that it is being resold through. Otherwise this will be empty. "name": "A String", # Output only. The resource name of the billing account. The resource name has the form `billingAccounts/{billing_account_id}`. For example, `billingAccounts/012345-567890-ABCDEF` would be the resource name for billing account `012345-567890-ABCDEF`. "open": True or False, # Output only. True if the billing account is open, and will therefore be charged for any usage on associated projects. False if the billing account is closed, and therefore projects associated with it will be unable to use paid services. - "parent": "A String", # Output only. The billing account's parent resource identifier. Use the `MoveBillingAccount` method to update the account's parent resource if it is a organization. Format: - organizations/{organization_id}, for example: organizations/12345678 - billingAccounts/{billing_account_id}, for example: `billingAccounts/012345-567890-ABCDEF` + "parent": "A String", # Output only. The billing account's parent resource identifier. Use the `MoveBillingAccount` method to update the account's parent resource if it is a organization. Format: - `organizations/{organization_id}`, for example, `organizations/12345678` - `billingAccounts/{billing_account_id}`, for example, `billingAccounts/012345-567890-ABCDEF` } x__xgafv: string, V1 error format. @@ -125,7 +125,7 @@

Method Details

"masterBillingAccount": "A String", # If this account is a [subaccount](https://cloud.google.com/billing/docs/concepts), then this will be the resource name of the parent billing account that it is being resold through. Otherwise this will be empty. "name": "A String", # Output only. The resource name of the billing account. The resource name has the form `billingAccounts/{billing_account_id}`. For example, `billingAccounts/012345-567890-ABCDEF` would be the resource name for billing account `012345-567890-ABCDEF`. "open": True or False, # Output only. True if the billing account is open, and will therefore be charged for any usage on associated projects. False if the billing account is closed, and therefore projects associated with it will be unable to use paid services. - "parent": "A String", # Output only. The billing account's parent resource identifier. Use the `MoveBillingAccount` method to update the account's parent resource if it is a organization. Format: - organizations/{organization_id}, for example: organizations/12345678 - billingAccounts/{billing_account_id}, for example: `billingAccounts/012345-567890-ABCDEF` + "parent": "A String", # Output only. The billing account's parent resource identifier. Use the `MoveBillingAccount` method to update the account's parent resource if it is a organization. Format: - `organizations/{organization_id}`, for example, `organizations/12345678` - `billingAccounts/{billing_account_id}`, for example, `billingAccounts/012345-567890-ABCDEF` }
@@ -134,8 +134,8 @@

Method Details

Lists the billing accounts that the current authenticated user has permission to [view](https://cloud.google.com/billing/docs/how-to/billing-access).
 
 Args:
-  parent: string, Optional. The parent resource to list billing accounts from. Format: - organizations/{organization_id} eg organizations/12345678 - billingAccounts/{billing_account_id} eg `billingAccounts/012345-567890-ABCDEF` (required)
-  filter: string, Options for how to filter the returned billing accounts. This only supports filtering for [subaccounts](https://cloud.google.com/billing/docs/concepts) under a single provided parent billing account. (e.g. "master_billing_account=billingAccounts/012345-678901-ABCDEF"). Boolean algebra and other fields are not currently supported.
+  parent: string, Optional. The parent resource to list billing accounts from. Format: - `organizations/{organization_id}`, for example, `organizations/12345678` - `billingAccounts/{billing_account_id}`, for example, `billingAccounts/012345-567890-ABCDEF` (required)
+  filter: string, Options for how to filter the returned billing accounts. This only supports filtering for [subaccounts](https://cloud.google.com/billing/docs/concepts) under a single provided parent billing account. (for example, `master_billing_account=billingAccounts/012345-678901-ABCDEF`). Boolean algebra and other fields are not currently supported.
   pageSize: integer, Requested page size. The maximum page size is 100; this is also the default.
   pageToken: string, A token identifying a page of results to return. This should be a `next_page_token` value returned from a previous `ListBillingAccounts` call. If unspecified, the first page of results is returned.
   x__xgafv: string, V1 error format.
@@ -153,7 +153,7 @@ 

Method Details

"masterBillingAccount": "A String", # If this account is a [subaccount](https://cloud.google.com/billing/docs/concepts), then this will be the resource name of the parent billing account that it is being resold through. Otherwise this will be empty. "name": "A String", # Output only. The resource name of the billing account. The resource name has the form `billingAccounts/{billing_account_id}`. For example, `billingAccounts/012345-567890-ABCDEF` would be the resource name for billing account `012345-567890-ABCDEF`. "open": True or False, # Output only. True if the billing account is open, and will therefore be charged for any usage on associated projects. False if the billing account is closed, and therefore projects associated with it will be unable to use paid services. - "parent": "A String", # Output only. The billing account's parent resource identifier. Use the `MoveBillingAccount` method to update the account's parent resource if it is a organization. Format: - organizations/{organization_id}, for example: organizations/12345678 - billingAccounts/{billing_account_id}, for example: `billingAccounts/012345-567890-ABCDEF` + "parent": "A String", # Output only. The billing account's parent resource identifier. Use the `MoveBillingAccount` method to update the account's parent resource if it is a organization. Format: - `organizations/{organization_id}`, for example, `organizations/12345678` - `billingAccounts/{billing_account_id}`, for example, `billingAccounts/012345-567890-ABCDEF` }, ], "nextPageToken": "A String", # A token to retrieve the next page of results. To retrieve the next page, call `ListBillingAccounts` again with the `page_token` field set to this value. This field is empty if there are no more results to retrieve. @@ -194,7 +194,7 @@

Method Details

"masterBillingAccount": "A String", # If this account is a [subaccount](https://cloud.google.com/billing/docs/concepts), then this will be the resource name of the parent billing account that it is being resold through. Otherwise this will be empty. "name": "A String", # Output only. The resource name of the billing account. The resource name has the form `billingAccounts/{billing_account_id}`. For example, `billingAccounts/012345-567890-ABCDEF` would be the resource name for billing account `012345-567890-ABCDEF`. "open": True or False, # Output only. True if the billing account is open, and will therefore be charged for any usage on associated projects. False if the billing account is closed, and therefore projects associated with it will be unable to use paid services. - "parent": "A String", # Output only. The billing account's parent resource identifier. Use the `MoveBillingAccount` method to update the account's parent resource if it is a organization. Format: - organizations/{organization_id}, for example: organizations/12345678 - billingAccounts/{billing_account_id}, for example: `billingAccounts/012345-567890-ABCDEF` + "parent": "A String", # Output only. The billing account's parent resource identifier. Use the `MoveBillingAccount` method to update the account's parent resource if it is a organization. Format: - `organizations/{organization_id}`, for example, `organizations/12345678` - `billingAccounts/{billing_account_id}`, for example, `billingAccounts/012345-567890-ABCDEF` }
diff --git a/docs/dyn/cloudbilling_v1beta.billingAccounts.html b/docs/dyn/cloudbilling_v1beta.billingAccounts.html index 1433c971eb4..97ba9893545 100644 --- a/docs/dyn/cloudbilling_v1beta.billingAccounts.html +++ b/docs/dyn/cloudbilling_v1beta.billingAccounts.html @@ -133,9 +133,9 @@

Method Details

}, "workloads": [ # The Google Cloud usage whose costs are estimated. A maximum of 100 workloads can be provided. { # Specifies usage on a single Google Cloud product over a time frame. Each Google Cloud product has its own message, containing specific product configuration parameters of the product usage amounts along each dimension in which the product is billed. - "cloudCdnEgressWorkload": { # Specifies usage for Cloud CDN egress. # Usage on Google Cloud CDN Egress. - "cacheEgressDestination": "A String", # The destination for the cache egress charges. - "cacheEgressRate": { # An amount of usage over a time frame. # Cache egress usage. The rate of data cache egressed in the destination. For example : units such as "GiBy/s" or "TBy/mo". + "cloudCdnEgressWorkload": { # Specifies usage for Cloud CDN Data Transfer. # Usage on Google Cloud CDN Data Transfer. + "cacheEgressDestination": "A String", # The destination for the cache data transfer. + "cacheEgressRate": { # An amount of usage over a time frame. # Cache data transfer usage. The rate of data cache transferred to the destination. Use units such as GiB/s or TiB/mo. "usageRateTimeline": { # A timeline of usage rates. Consists of a series of entries, each of which specifies a constant rate of usage during a time interval. Each entry contains an effective time. The usage rate is in effect from that time until the effective time of the subsequent entry, or, for the last entry, for the remaining portion of estimation time frame. Effective times are specified as an offset into the estimation time frame. Usage is considered to be zero until the `effective_time` of the first entry. All subsequent entries must have an effective time greater than the previous entry and less than the estimate time frame. The effective time on all entries must be an integer number of hours. # A timeline of usage rates over the estimate interval. "unit": "A String", # The unit for the usage rate in each timeline entry. If you provide an incorrect unit for an instance, the correct unit is provided in the error message. The supported units are a subset of [The Unified Code for Units of Measure](https://ucum.org/ucum.html) standard: * **Time units (TIME-UNIT)** * `s` second * `min` minute * `h` hour * `d` day * `wk` week * `mo` month * `yr` year * `ms` millisecond * `us` microsecond * `ns` nanosecond * **Basic storage units (BASIC-STORAGE-UNIT)** * `bit` bit * `By` byte * **Count units (COUNT-UNIT)** * `count` count * **Prefixes (PREFIX)** * `k` kilo (10^3) * `M` mega (10^6) * `G` giga (10^9) * `T` tera (10^12) * `P` peta (10^15) * `Ki` kibi (2^10) * `Mi` mebi (2^20) * `Gi` gibi (2^30) * `Ti` tebi (2^40) * `Pi` pebi (2^50) **Grammar** The grammar also includes these connectors: * `/` division or ratio (as an infix operator). For example: `kBy/{email}` or `MiBy/10ms`. * `.` multiplication or composition (as an infix operator). For example: `GBy.d` or `k{watt}.h`. The grammar for a unit is as follows: ``` Expression = Component { "." Component } { "/" Component } ; Component = ( [ PREFIX ] UNIT | "%" ) [ Annotation ] | Annotation | "1" ; UNIT = TIME-UNIT | STORAGE-UNIT | DATA-UNIT | COUNT-UNIT Annotation = "{" NAME "}" ; ``` Examples: * Request per second: `1/s` or `{requests}/s` * GibiBytes: `GiBy` * GibiBytes * seconds: `GiBy.s` "usageRateTimelineEntries": [ # The timeline entries. Each entry has a start time and usage rate. The start time specifies the effective time of the usage rate. The entries must be sorted by start time in an increasing order. @@ -182,8 +182,8 @@

Method Details

}, }, }, - "cloudInterconnectEgressWorkload": { # The interconnect egress only includes the Interconnect Egress. Please use the standard egress traffic interface to specify your standard egress usage. # Usage on Google Cloud Interconnect Egress. - "egressRate": { # An amount of usage over a time frame. # Data egress usage. This usage applies when you move or copy data from one Google Cloud service to another service. Expected units such as "GiBy/s, By/s, etc." + "cloudInterconnectEgressWorkload": { # Includes the estimate for Interconnect Data Transfer only. To specify usage for data transfer between VMs and internet end-points, use the Standard Tier Internet Data Transfer interface. # Usage on Google Cloud Interconnect Data Transfer. + "egressRate": { # An amount of usage over a time frame. # Outbound data transfer usage. This usage applies when you move or copy data from one Google Cloud service to another service. The units are "GiB/s, B/s, and so on." "usageRateTimeline": { # A timeline of usage rates. Consists of a series of entries, each of which specifies a constant rate of usage during a time interval. Each entry contains an effective time. The usage rate is in effect from that time until the effective time of the subsequent entry, or, for the last entry, for the remaining portion of estimation time frame. Effective times are specified as an offset into the estimation time frame. Usage is considered to be zero until the `effective_time` of the first entry. All subsequent entries must have an effective time greater than the previous entry and less than the estimate time frame. The effective time on all entries must be an integer number of hours. # A timeline of usage rates over the estimate interval. "unit": "A String", # The unit for the usage rate in each timeline entry. If you provide an incorrect unit for an instance, the correct unit is provided in the error message. The supported units are a subset of [The Unified Code for Units of Measure](https://ucum.org/ucum.html) standard: * **Time units (TIME-UNIT)** * `s` second * `min` minute * `h` hour * `d` day * `wk` week * `mo` month * `yr` year * `ms` millisecond * `us` microsecond * `ns` nanosecond * **Basic storage units (BASIC-STORAGE-UNIT)** * `bit` bit * `By` byte * **Count units (COUNT-UNIT)** * `count` count * **Prefixes (PREFIX)** * `k` kilo (10^3) * `M` mega (10^6) * `G` giga (10^9) * `T` tera (10^12) * `P` peta (10^15) * `Ki` kibi (2^10) * `Mi` mebi (2^20) * `Gi` gibi (2^30) * `Ti` tebi (2^40) * `Pi` pebi (2^50) **Grammar** The grammar also includes these connectors: * `/` division or ratio (as an infix operator). For example: `kBy/{email}` or `MiBy/10ms`. * `.` multiplication or composition (as an infix operator). For example: `GBy.d` or `k{watt}.h`. The grammar for a unit is as follows: ``` Expression = Component { "." Component } { "/" Component } ; Component = ( [ PREFIX ] UNIT | "%" ) [ Annotation ] | Annotation | "1" ; UNIT = TIME-UNIT | STORAGE-UNIT | DATA-UNIT | COUNT-UNIT Annotation = "{" NAME "}" ; ``` Examples: * Request per second: `1/s` or `{requests}/s` * GibiBytes: `GiBy` * GibiBytes * seconds: `GiBy.s` "usageRateTimelineEntries": [ # The timeline entries. Each entry has a start time and usage rate. The start time specifies the effective time of the usage rate. The entries must be sorted by start time in an increasing order. @@ -196,7 +196,7 @@

Method Details

], }, }, - "interconnectConnectionLocation": "A String", # Locations in the [Interconnect connection location table](https://cloud.google.com/vpc/network-pricing#interconnect-pricing). This is the interconnect egress charges. + "interconnectConnectionLocation": "A String", # Locations in the [Interconnect connection location table](https://cloud.google.com/vpc/network-pricing#interconnect-pricing). These are the Interconnect Data Transfer charges. }, "cloudInterconnectWorkload": { # Specifies usage for Cloud Interconnect resources. # Usage on Google Cloud Interconnect. "interconnectAttachments": [ # VLAN attachment used for interconnect. @@ -233,9 +233,9 @@

Method Details

}, }, }, - "cloudStorageEgressWorkload": { # Specification of a network type. Network egress within Google Cloud applies when you move or copy data from one Cloud Storage bucket to another or when another Google Cloud service accesses data in your Cloud Storage bucket.This includes the network egress within Google Cloud and the general network usage. # Usage on a cloud storage egress. + "cloudStorageEgressWorkload": { # Specification of a network type. Network data transfer within Google Cloud applies when you move or copy data from one Cloud Storage bucket to another or when another Google Cloud service accesses data in your Cloud Storage bucket.This includes the network data transfer within Google Cloud and the general network usage. # Usage on Cloud Storage Data Transfer. "destinationContinent": "A String", # Where the data is sent to. - "egressRate": { # An amount of usage over a time frame. # Egress usage rate. This usage applies when you move or copy data from one Cloud Storage bucket to another or when another Google Cloud service accesses data in your Cloud Storage bucket. Expected units such as "GiBy/s, By/s, ..." + "egressRate": { # An amount of usage over a time frame. # Data transfer usage rate. This usage applies when you move or copy data from one Cloud Storage bucket to another or when another Google Cloud service accesses data in your Cloud Storage bucket. Expected units such as "GiB/s, B/s, ..." "usageRateTimeline": { # A timeline of usage rates. Consists of a series of entries, each of which specifies a constant rate of usage during a time interval. Each entry contains an effective time. The usage rate is in effect from that time until the effective time of the subsequent entry, or, for the last entry, for the remaining portion of estimation time frame. Effective times are specified as an offset into the estimation time frame. Usage is considered to be zero until the `effective_time` of the first entry. All subsequent entries must have an effective time greater than the previous entry and less than the estimate time frame. The effective time on all entries must be an integer number of hours. # A timeline of usage rates over the estimate interval. "unit": "A String", # The unit for the usage rate in each timeline entry. If you provide an incorrect unit for an instance, the correct unit is provided in the error message. The supported units are a subset of [The Unified Code for Units of Measure](https://ucum.org/ucum.html) standard: * **Time units (TIME-UNIT)** * `s` second * `min` minute * `h` hour * `d` day * `wk` week * `mo` month * `yr` year * `ms` millisecond * `us` microsecond * `ns` nanosecond * **Basic storage units (BASIC-STORAGE-UNIT)** * `bit` bit * `By` byte * **Count units (COUNT-UNIT)** * `count` count * **Prefixes (PREFIX)** * `k` kilo (10^3) * `M` mega (10^6) * `G` giga (10^9) * `T` tera (10^12) * `P` peta (10^15) * `Ki` kibi (2^10) * `Mi` mebi (2^20) * `Gi` gibi (2^30) * `Ti` tebi (2^40) * `Pi` pebi (2^50) **Grammar** The grammar also includes these connectors: * `/` division or ratio (as an infix operator). For example: `kBy/{email}` or `MiBy/10ms`. * `.` multiplication or composition (as an infix operator). For example: `GBy.d` or `k{watt}.h`. The grammar for a unit is as follows: ``` Expression = Component { "." Component } { "/" Component } ; Component = ( [ PREFIX ] UNIT | "%" ) [ Annotation ] | Annotation | "1" ; UNIT = TIME-UNIT | STORAGE-UNIT | DATA-UNIT | COUNT-UNIT Annotation = "{" NAME "}" ; ``` Examples: * Request per second: `1/s` or `{requests}/s` * GibiBytes: `GiBy` * GibiBytes * seconds: `GiBy.s` "usageRateTimelineEntries": [ # The timeline entries. Each entry has a start time and usage rate. The start time specifies the effective time of the usage rate. The entries must be sorted by start time in an increasing order. @@ -382,9 +382,9 @@

Method Details

"region": "A String", # The [region](https://cloud.google.com/compute/docs/regions-zones) where the VMs run. For example: "us-central1". }, "name": "A String", # Required. A name for this workload. All workloads in a `CostScenario` must have a unique `name`. Each `name` may be at most 128 characters long. - "premiumTierEgressWorkload": { # Specify Premium Tier Internet egress networking. # Usage on Premium Tier Internet Egress. + "premiumTierEgressWorkload": { # Specify a Premium Tier Internet Data Transfer networking workload. # Usage on Premium Tier Internet Data Transfer. "destinationContinent": "A String", # Where the data is sent to. - "egressRate": { # An amount of usage over a time frame. # Premium Tier egress usage. Expected units such as "GiBy/s, By/s, etc." + "egressRate": { # An amount of usage over a time frame. # Premium Tier Data Transfer usage. The expected units are GiB/s, B/s, and so on. "usageRateTimeline": { # A timeline of usage rates. Consists of a series of entries, each of which specifies a constant rate of usage during a time interval. Each entry contains an effective time. The usage rate is in effect from that time until the effective time of the subsequent entry, or, for the last entry, for the remaining portion of estimation time frame. Effective times are specified as an offset into the estimation time frame. Usage is considered to be zero until the `effective_time` of the first entry. All subsequent entries must have an effective time greater than the previous entry and less than the estimate time frame. The effective time on all entries must be an integer number of hours. # A timeline of usage rates over the estimate interval. "unit": "A String", # The unit for the usage rate in each timeline entry. If you provide an incorrect unit for an instance, the correct unit is provided in the error message. The supported units are a subset of [The Unified Code for Units of Measure](https://ucum.org/ucum.html) standard: * **Time units (TIME-UNIT)** * `s` second * `min` minute * `h` hour * `d` day * `wk` week * `mo` month * `yr` year * `ms` millisecond * `us` microsecond * `ns` nanosecond * **Basic storage units (BASIC-STORAGE-UNIT)** * `bit` bit * `By` byte * **Count units (COUNT-UNIT)** * `count` count * **Prefixes (PREFIX)** * `k` kilo (10^3) * `M` mega (10^6) * `G` giga (10^9) * `T` tera (10^12) * `P` peta (10^15) * `Ki` kibi (2^10) * `Mi` mebi (2^20) * `Gi` gibi (2^30) * `Ti` tebi (2^40) * `Pi` pebi (2^50) **Grammar** The grammar also includes these connectors: * `/` division or ratio (as an infix operator). For example: `kBy/{email}` or `MiBy/10ms`. * `.` multiplication or composition (as an infix operator). For example: `GBy.d` or `k{watt}.h`. The grammar for a unit is as follows: ``` Expression = Component { "." Component } { "/" Component } ; Component = ( [ PREFIX ] UNIT | "%" ) [ Annotation ] | Annotation | "1" ; UNIT = TIME-UNIT | STORAGE-UNIT | DATA-UNIT | COUNT-UNIT Annotation = "{" NAME "}" ; ``` Examples: * Request per second: `1/s` or `{requests}/s` * GibiBytes: `GiBy` * GibiBytes * seconds: `GiBy.s` "usageRateTimelineEntries": [ # The timeline entries. Each entry has a start time and usage rate. The start time specifies the effective time of the usage rate. The entries must be sorted by start time in an increasing order. @@ -397,10 +397,10 @@

Method Details

], }, }, - "sourceRegion": "A String", # Which [region](https://cloud.google.com/compute/docs/regions-zones) the egress data comes from. + "sourceRegion": "A String", # Which [region](https://cloud.google.com/compute/docs/regions-zones) the data comes from. }, - "standardTierEgressWorkload": { # Specify Standard Tier Internet egress networking. # Usage on Standard Tier Internet Egress. - "egressRate": { # An amount of usage over a time frame. # Standard tier egress usage. Expected units such as "GiBy/s, By/s, etc." + "standardTierEgressWorkload": { # Specify Standard Tier Internet Data Transfer. # Usage on Standard Tier Internet Data Transfer. + "egressRate": { # An amount of usage over a time frame. # Standard Tier Data Transfer usage. The expected units are GiB/s, B/s, and so on. "usageRateTimeline": { # A timeline of usage rates. Consists of a series of entries, each of which specifies a constant rate of usage during a time interval. Each entry contains an effective time. The usage rate is in effect from that time until the effective time of the subsequent entry, or, for the last entry, for the remaining portion of estimation time frame. Effective times are specified as an offset into the estimation time frame. Usage is considered to be zero until the `effective_time` of the first entry. All subsequent entries must have an effective time greater than the previous entry and less than the estimate time frame. The effective time on all entries must be an integer number of hours. # A timeline of usage rates over the estimate interval. "unit": "A String", # The unit for the usage rate in each timeline entry. If you provide an incorrect unit for an instance, the correct unit is provided in the error message. The supported units are a subset of [The Unified Code for Units of Measure](https://ucum.org/ucum.html) standard: * **Time units (TIME-UNIT)** * `s` second * `min` minute * `h` hour * `d` day * `wk` week * `mo` month * `yr` year * `ms` millisecond * `us` microsecond * `ns` nanosecond * **Basic storage units (BASIC-STORAGE-UNIT)** * `bit` bit * `By` byte * **Count units (COUNT-UNIT)** * `count` count * **Prefixes (PREFIX)** * `k` kilo (10^3) * `M` mega (10^6) * `G` giga (10^9) * `T` tera (10^12) * `P` peta (10^15) * `Ki` kibi (2^10) * `Mi` mebi (2^20) * `Gi` gibi (2^30) * `Ti` tebi (2^40) * `Pi` pebi (2^50) **Grammar** The grammar also includes these connectors: * `/` division or ratio (as an infix operator). For example: `kBy/{email}` or `MiBy/10ms`. * `.` multiplication or composition (as an infix operator). For example: `GBy.d` or `k{watt}.h`. The grammar for a unit is as follows: ``` Expression = Component { "." Component } { "/" Component } ; Component = ( [ PREFIX ] UNIT | "%" ) [ Annotation ] | Annotation | "1" ; UNIT = TIME-UNIT | STORAGE-UNIT | DATA-UNIT | COUNT-UNIT Annotation = "{" NAME "}" ; ``` Examples: * Request per second: `1/s` or `{requests}/s` * GibiBytes: `GiBy` * GibiBytes * seconds: `GiBy.s` "usageRateTimelineEntries": [ # The timeline entries. Each entry has a start time and usage rate. The start time specifies the effective time of the usage rate. The entries must be sorted by start time in an increasing order. @@ -413,12 +413,12 @@

Method Details

], }, }, - "sourceRegion": "A String", # Which [region](https://cloud.google.com/compute/docs/regions-zones) the egress data comes from. + "sourceRegion": "A String", # Which [region](https://cloud.google.com/compute/docs/regions-zones) the data is transferred from. }, - "vmToVmEgressWorkload": { # Specify VM to VM egress. # Usage on Vm to Vm Egress. - "interRegionEgress": { # Egress traffic between two regions. - "destinationRegion": "A String", # Which [region](https://cloud.google.com/compute/docs/regions-zones) the egress data goes to. - "egressRate": { # An amount of usage over a time frame. # VM to VM egress usage. Expected units such as "GiBy/s, By/s, etc." + "vmToVmEgressWorkload": { # Specify VM to VM data transfer. # Usage on VM to VM Data Transfer. + "interRegionEgress": { # Data transfer between two regions. + "destinationRegion": "A String", # Which [region](https://cloud.google.com/compute/docs/regions-zones) the data is transferred to. + "egressRate": { # An amount of usage over a time frame. # VM to VM data transfer usage. The expected units such are GiB/s, B/s, and so on. "usageRateTimeline": { # A timeline of usage rates. Consists of a series of entries, each of which specifies a constant rate of usage during a time interval. Each entry contains an effective time. The usage rate is in effect from that time until the effective time of the subsequent entry, or, for the last entry, for the remaining portion of estimation time frame. Effective times are specified as an offset into the estimation time frame. Usage is considered to be zero until the `effective_time` of the first entry. All subsequent entries must have an effective time greater than the previous entry and less than the estimate time frame. The effective time on all entries must be an integer number of hours. # A timeline of usage rates over the estimate interval. "unit": "A String", # The unit for the usage rate in each timeline entry. If you provide an incorrect unit for an instance, the correct unit is provided in the error message. The supported units are a subset of [The Unified Code for Units of Measure](https://ucum.org/ucum.html) standard: * **Time units (TIME-UNIT)** * `s` second * `min` minute * `h` hour * `d` day * `wk` week * `mo` month * `yr` year * `ms` millisecond * `us` microsecond * `ns` nanosecond * **Basic storage units (BASIC-STORAGE-UNIT)** * `bit` bit * `By` byte * **Count units (COUNT-UNIT)** * `count` count * **Prefixes (PREFIX)** * `k` kilo (10^3) * `M` mega (10^6) * `G` giga (10^9) * `T` tera (10^12) * `P` peta (10^15) * `Ki` kibi (2^10) * `Mi` mebi (2^20) * `Gi` gibi (2^30) * `Ti` tebi (2^40) * `Pi` pebi (2^50) **Grammar** The grammar also includes these connectors: * `/` division or ratio (as an infix operator). For example: `kBy/{email}` or `MiBy/10ms`. * `.` multiplication or composition (as an infix operator). For example: `GBy.d` or `k{watt}.h`. The grammar for a unit is as follows: ``` Expression = Component { "." Component } { "/" Component } ; Component = ( [ PREFIX ] UNIT | "%" ) [ Annotation ] | Annotation | "1" ; UNIT = TIME-UNIT | STORAGE-UNIT | DATA-UNIT | COUNT-UNIT Annotation = "{" NAME "}" ; ``` Examples: * Request per second: `1/s` or `{requests}/s` * GibiBytes: `GiBy` * GibiBytes * seconds: `GiBy.s` "usageRateTimelineEntries": [ # The timeline entries. Each entry has a start time and usage rate. The start time specifies the effective time of the usage rate. The entries must be sorted by start time in an increasing order. @@ -431,10 +431,10 @@

Method Details

], }, }, - "sourceRegion": "A String", # Which [region](https://cloud.google.com/compute/docs/regions-zones) the egress data comes from. + "sourceRegion": "A String", # Which [region](https://cloud.google.com/compute/docs/regions-zones) the data is transferred from. }, - "intraRegionEgress": { # Egress traffic within the same region. When source region and destination region are in the same zone, using the internal IP addresses, there isn't any egress charge. - "egressRate": { # An amount of usage over a time frame. # VM to VM egress usage. Expected units such as "GiBy/s, By/s, etc." + "intraRegionEgress": { # Data transfer within the same region. When the source region and destination region are in the same zone, using internal IP addresses, there isn't any charge for data transfer. + "egressRate": { # An amount of usage over a time frame. # VM to VM data transfer usage. The expected are GiB/s, B/s, and so on. "usageRateTimeline": { # A timeline of usage rates. Consists of a series of entries, each of which specifies a constant rate of usage during a time interval. Each entry contains an effective time. The usage rate is in effect from that time until the effective time of the subsequent entry, or, for the last entry, for the remaining portion of estimation time frame. Effective times are specified as an offset into the estimation time frame. Usage is considered to be zero until the `effective_time` of the first entry. All subsequent entries must have an effective time greater than the previous entry and less than the estimate time frame. The effective time on all entries must be an integer number of hours. # A timeline of usage rates over the estimate interval. "unit": "A String", # The unit for the usage rate in each timeline entry. If you provide an incorrect unit for an instance, the correct unit is provided in the error message. The supported units are a subset of [The Unified Code for Units of Measure](https://ucum.org/ucum.html) standard: * **Time units (TIME-UNIT)** * `s` second * `min` minute * `h` hour * `d` day * `wk` week * `mo` month * `yr` year * `ms` millisecond * `us` microsecond * `ns` nanosecond * **Basic storage units (BASIC-STORAGE-UNIT)** * `bit` bit * `By` byte * **Count units (COUNT-UNIT)** * `count` count * **Prefixes (PREFIX)** * `k` kilo (10^3) * `M` mega (10^6) * `G` giga (10^9) * `T` tera (10^12) * `P` peta (10^15) * `Ki` kibi (2^10) * `Mi` mebi (2^20) * `Gi` gibi (2^30) * `Ti` tebi (2^40) * `Pi` pebi (2^50) **Grammar** The grammar also includes these connectors: * `/` division or ratio (as an infix operator). For example: `kBy/{email}` or `MiBy/10ms`. * `.` multiplication or composition (as an infix operator). For example: `GBy.d` or `k{watt}.h`. The grammar for a unit is as follows: ``` Expression = Component { "." Component } { "/" Component } ; Component = ( [ PREFIX ] UNIT | "%" ) [ Annotation ] | Annotation | "1" ; UNIT = TIME-UNIT | STORAGE-UNIT | DATA-UNIT | COUNT-UNIT Annotation = "{" NAME "}" ; ``` Examples: * Request per second: `1/s` or `{requests}/s` * GibiBytes: `GiBy` * GibiBytes * seconds: `GiBy.s` "usageRateTimelineEntries": [ # The timeline entries. Each entry has a start time and usage rate. The start time specifies the effective time of the usage rate. The entries must be sorted by start time in an increasing order. diff --git a/docs/dyn/cloudbilling_v1beta.billingAccounts.skus.html b/docs/dyn/cloudbilling_v1beta.billingAccounts.skus.html index b5b2bfb6cac..8b12fc31e95 100644 --- a/docs/dyn/cloudbilling_v1beta.billingAccounts.skus.html +++ b/docs/dyn/cloudbilling_v1beta.billingAccounts.skus.html @@ -79,6 +79,11 @@

Instance Methods

Returns the price Resource.

+

+ prices() +

+

Returns the prices Resource.

+

close()

Close httplib2 connections.

diff --git a/docs/dyn/cloudbilling_v1beta.billingAccounts.skus.prices.html b/docs/dyn/cloudbilling_v1beta.billingAccounts.skus.prices.html new file mode 100644 index 00000000000..846cb3231c3 --- /dev/null +++ b/docs/dyn/cloudbilling_v1beta.billingAccounts.skus.prices.html @@ -0,0 +1,197 @@ + + + +

Cloud Billing API . billingAccounts . skus . prices

+

Instance Methods

+

+ close()

+

Close httplib2 connections.

+

+ list(parent, currencyCode=None, pageSize=None, pageToken=None, x__xgafv=None)

+

Lists the latest prices for SKUs available to your Cloud Billing account.

+

+ list_next()

+

Retrieves the next page of results.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ +
+ list(parent, currencyCode=None, pageSize=None, pageToken=None, x__xgafv=None) +
Lists the latest prices for SKUs available to your Cloud Billing account.
+
+Args:
+  parent: string, Required. To list all Billing Account SKUs, use `-` as the SKU ID. Format: `billingAccounts/{billing_account}/skus/-` Note: Specifying an actual SKU resource id will return a collection of one Billing Account Price. (required)
+  currencyCode: string, Optional. ISO-4217 currency code for the price. If not specified, currency of billing account will be used.
+  pageSize: integer, Optional. Maximum number of billing account price to return. Results may return fewer than this value. Default value is 50 and maximum value is 5000.
+  pageToken: string, Optional. Page token received from a previous ListBillingAccountPrices call to retrieve the next page of results. If this field is empty, the first page is returned.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response message for ListBillingAccountPrices.
+  "billingAccountPrices": [ # The returned billing account prices.
+    { # Encapsulates the latest price for a billing account SKU.
+      "currencyCode": "A String", # ISO-4217 currency code for the price.
+      "name": "A String", # Resource name for the latest billing account price.
+      "priceReason": { # Encapsulates a price reason which contains background information about the origin of the price. # Background information on the origin of the price.
+        "defaultPrice": { # Encapsulates a default price which is the current list price. # Default price which is the current list price.
+        },
+        "fixedDiscount": { # Encapsulates a discount off the list price, anchored to the list price as of a fixed time. # Discount off the list price, anchored to the list price as of a fixed time.
+          "discountPercent": { # A representation of a decimal value, such as 2.5. Clients may convert values into language-native decimal formats, such as Java's BigDecimal or Python's decimal.Decimal. [BigDecimal]: https://docs.oracle.com/en/java/javase/11/docs/api/java.base/java/math/BigDecimal.html [decimal.Decimal]: https://docs.python.org/3/library/decimal.html # Percentage of the fixed discount.
+            "value": "A String", # The decimal value, as a string. The string representation consists of an optional sign, `+` (`U+002B`) or `-` (`U+002D`), followed by a sequence of zero or more decimal digits ("the integer"), optionally followed by a fraction, optionally followed by an exponent. An empty string **should** be interpreted as `0`. The fraction consists of a decimal point followed by zero or more decimal digits. The string must contain at least one digit in either the integer or the fraction. The number formed by the sign, the integer and the fraction is referred to as the significand. The exponent consists of the character `e` (`U+0065`) or `E` (`U+0045`) followed by one or more decimal digits. Services **should** normalize decimal values before storing them by: - Removing an explicitly-provided `+` sign (`+2.5` -> `2.5`). - Replacing a zero-length integer value with `0` (`.5` -> `0.5`). - Coercing the exponent character to upper-case, with explicit sign (`2.5e8` -> `2.5E+8`). - Removing an explicitly-provided zero exponent (`2.5E0` -> `2.5`). Services **may** perform additional normalization based on its own needs and the internal decimal implementation selected, such as shifting the decimal point and exponent value together (example: `2.5E-1` <-> `0.25`). Additionally, services **may** preserve trailing zeroes in the fraction to indicate increased precision, but are not required to do so. Note that only the `.` character is supported to divide the integer and the fraction; `,` **should not** be supported regardless of locale. Additionally, thousand separators **should not** be supported. If a service does support them, values **must** be normalized. The ENBF grammar is: DecimalString = '' | [Sign] Significand [Exponent]; Sign = '+' | '-'; Significand = Digits '.' | [Digits] '.' Digits; Exponent = ('e' | 'E') [Sign] Digits; Digits = { '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' }; Services **should** clearly document the range of supported values, the maximum supported precision (total number of digits), and, if applicable, the scale (number of digits after the decimal point), as well as how it behaves when receiving out-of-bounds values. Services **may** choose to accept values passed as input even when the value has a higher precision or scale than the service supports, and **should** round the value to fit the supported scale. Alternatively, the service **may** error with `400 Bad Request` (`INVALID_ARGUMENT` in gRPC) if precision would be lost. Services **should** error with `400 Bad Request` (`INVALID_ARGUMENT` in gRPC) if the service receives a value outside of the supported range.
+          },
+          "discountScopeType": "A String", # Type of the fixed discount scope which indicates the source of the discount. It can have values such as 'unspecified' and 'sku-group'.
+          "fixTime": "A String", # Time that the fixed discount is anchored to.
+          "skuGroup": "A String", # SKU group where the fixed discount comes from.
+        },
+        "fixedPrice": { # Encapsulates a set fixed price applicable during the terms of a contract agreement. # Fixed price applicable during the terms of a contract agreement.
+        },
+        "floatingDiscount": { # Encapsulates a discount off the current list price, not anchored to any list price as of a fixed time. # Discount off the current list price, not anchored to any list price as of a fixed time.
+          "discountPercent": { # A representation of a decimal value, such as 2.5. Clients may convert values into language-native decimal formats, such as Java's BigDecimal or Python's decimal.Decimal. [BigDecimal]: https://docs.oracle.com/en/java/javase/11/docs/api/java.base/java/math/BigDecimal.html [decimal.Decimal]: https://docs.python.org/3/library/decimal.html # Percentage of the floating discount.
+            "value": "A String", # The decimal value, as a string. The string representation consists of an optional sign, `+` (`U+002B`) or `-` (`U+002D`), followed by a sequence of zero or more decimal digits ("the integer"), optionally followed by a fraction, optionally followed by an exponent. An empty string **should** be interpreted as `0`. The fraction consists of a decimal point followed by zero or more decimal digits. The string must contain at least one digit in either the integer or the fraction. The number formed by the sign, the integer and the fraction is referred to as the significand. The exponent consists of the character `e` (`U+0065`) or `E` (`U+0045`) followed by one or more decimal digits. Services **should** normalize decimal values before storing them by: - Removing an explicitly-provided `+` sign (`+2.5` -> `2.5`). - Replacing a zero-length integer value with `0` (`.5` -> `0.5`). - Coercing the exponent character to upper-case, with explicit sign (`2.5e8` -> `2.5E+8`). - Removing an explicitly-provided zero exponent (`2.5E0` -> `2.5`). Services **may** perform additional normalization based on its own needs and the internal decimal implementation selected, such as shifting the decimal point and exponent value together (example: `2.5E-1` <-> `0.25`). Additionally, services **may** preserve trailing zeroes in the fraction to indicate increased precision, but are not required to do so. Note that only the `.` character is supported to divide the integer and the fraction; `,` **should not** be supported regardless of locale. Additionally, thousand separators **should not** be supported. If a service does support them, values **must** be normalized. The ENBF grammar is: DecimalString = '' | [Sign] Significand [Exponent]; Sign = '+' | '-'; Significand = Digits '.' | [Digits] '.' Digits; Exponent = ('e' | 'E') [Sign] Digits; Digits = { '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' }; Services **should** clearly document the range of supported values, the maximum supported precision (total number of digits), and, if applicable, the scale (number of digits after the decimal point), as well as how it behaves when receiving out-of-bounds values. Services **may** choose to accept values passed as input even when the value has a higher precision or scale than the service supports, and **should** round the value to fit the supported scale. Alternatively, the service **may** error with `400 Bad Request` (`INVALID_ARGUMENT` in gRPC) if precision would be lost. Services **should** error with `400 Bad Request` (`INVALID_ARGUMENT` in gRPC) if the service receives a value outside of the supported range.
+          },
+          "discountScopeType": "A String", # Type of the floating discount scope which indicates the source of the discount. It can have values such as 'unspecified' and 'sku-group'.
+          "skuGroup": "A String", # SKU group where the floating discount comes from.
+        },
+        "listPriceAsCeiling": { # Encapsulates a contract feature that the list price (DefaultPrice) will be used for the price if the current list price drops lower than the custom fixed price. Available to new contracts after March 21, 2022. Applies to all fixed price SKUs in the contract, including FixedPrice, FixedDiscount, MigratedPrice, and MergedPrice. # Contract feature that the list price (DefaultPrice) will be used for the price if the current list price drops lower than the custom fixed price. Available to new contracts after March 21, 2022. Applies to all fixed price SKUs in the contract, including FixedPrice, FixedDiscount, MigratedPrice, and MergedPrice.
+        },
+        "mergedPrice": { # Encapsulates a price after merging from multiple sources. With merged tiers, each individual tier can be from a different source with different discount types. # Price after merging from multiple sources.
+        },
+        "migratedPrice": { # Encapsulates a price migrated from other SKUs. # Price migrated from other SKUs.
+          "sourceSku": "A String", # Source SKU where the discount is migrated from. Format: billingAccounts/{billing_account}/skus/{sku}
+        },
+        "type": "A String", # Type of the price reason. It can have values such as 'unspecified', 'default-price', 'fixed-price', 'fixed-discount', 'floating-discount', 'migrated-price', 'merged-price', 'list-price-as-ceiling'.
+      },
+      "rate": { # Encapsulates a `Rate` price. Billing account SKUs with `Rate` price are offered by pricing tiers. The price have 1 or more rate pricing tiers. # Rate price metadata. Billing account SKUs with `Rate` price are offered by pricing tiers. The price can have 1 or more rate pricing tiers.
+        "aggregationInfo": { # Encapsulates the aggregation information such as aggregation level and interval for a billing account price. # Aggregation info for tiers such as aggregation level and interval.
+          "interval": "A String", # Interval at which usage is aggregated to compute cost. Example: "MONTHLY" interval indicates that usage is aggregated every month.
+          "level": "A String", # Level at which usage is aggregated to compute cost. Example: "ACCOUNT" level indicates that usage is aggregated across all projects in a single account.
+        },
+        "tiers": [ # All tiers associated with the `Rate` price.
+          { # Encapsulates a rate price tier.
+            "contractPrice": { # Represents an amount of money with its currency type. # Negotiated contract price specific for a billing account.
+              "currencyCode": "A String", # The three-letter currency code defined in ISO 4217.
+              "nanos": 42, # Number of nano (10^-9) units of the amount. The value must be between -999,999,999 and +999,999,999 inclusive. If `units` is positive, `nanos` must be positive or zero. If `units` is zero, `nanos` can be positive, zero, or negative. If `units` is negative, `nanos` must be negative or zero. For example $-1.75 is represented as `units`=-1 and `nanos`=-750,000,000.
+              "units": "A String", # The whole units of the amount. For example if `currencyCode` is `"USD"`, then 1 unit is one US dollar.
+            },
+            "effectiveDiscountPercent": { # A representation of a decimal value, such as 2.5. Clients may convert values into language-native decimal formats, such as Java's BigDecimal or Python's decimal.Decimal. [BigDecimal]: https://docs.oracle.com/en/java/javase/11/docs/api/java.base/java/math/BigDecimal.html [decimal.Decimal]: https://docs.python.org/3/library/decimal.html # Percentage of effective discount calculated using the current list price per pricing tier. Formula used: effective_discount_percent = (list_price - contract_price) / list_price × 100 If list_price and contract_price are zero, this field is the same as `discount_percent` of FixedDiscount and FloatingDiscount. If your contract does NOT have the feature LIST_PRICE_AS_CEILING enabled, the effective_discount_percent can be negative if the SKU has a FixedDiscount and the current list price is lower than the list price on the date of the contract agreement. See the `FixedDiscount.fix_time` on when the discount was set. If you have questions regarding pricing per SKU, contact your Account team for more details.
+              "value": "A String", # The decimal value, as a string. The string representation consists of an optional sign, `+` (`U+002B`) or `-` (`U+002D`), followed by a sequence of zero or more decimal digits ("the integer"), optionally followed by a fraction, optionally followed by an exponent. An empty string **should** be interpreted as `0`. The fraction consists of a decimal point followed by zero or more decimal digits. The string must contain at least one digit in either the integer or the fraction. The number formed by the sign, the integer and the fraction is referred to as the significand. The exponent consists of the character `e` (`U+0065`) or `E` (`U+0045`) followed by one or more decimal digits. Services **should** normalize decimal values before storing them by: - Removing an explicitly-provided `+` sign (`+2.5` -> `2.5`). - Replacing a zero-length integer value with `0` (`.5` -> `0.5`). - Coercing the exponent character to upper-case, with explicit sign (`2.5e8` -> `2.5E+8`). - Removing an explicitly-provided zero exponent (`2.5E0` -> `2.5`). Services **may** perform additional normalization based on its own needs and the internal decimal implementation selected, such as shifting the decimal point and exponent value together (example: `2.5E-1` <-> `0.25`). Additionally, services **may** preserve trailing zeroes in the fraction to indicate increased precision, but are not required to do so. Note that only the `.` character is supported to divide the integer and the fraction; `,` **should not** be supported regardless of locale. Additionally, thousand separators **should not** be supported. If a service does support them, values **must** be normalized. The ENBF grammar is: DecimalString = '' | [Sign] Significand [Exponent]; Sign = '+' | '-'; Significand = Digits '.' | [Digits] '.' Digits; Exponent = ('e' | 'E') [Sign] Digits; Digits = { '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' }; Services **should** clearly document the range of supported values, the maximum supported precision (total number of digits), and, if applicable, the scale (number of digits after the decimal point), as well as how it behaves when receiving out-of-bounds values. Services **may** choose to accept values passed as input even when the value has a higher precision or scale than the service supports, and **should** round the value to fit the supported scale. Alternatively, the service **may** error with `400 Bad Request` (`INVALID_ARGUMENT` in gRPC) if precision would be lost. Services **should** error with `400 Bad Request` (`INVALID_ARGUMENT` in gRPC) if the service receives a value outside of the supported range.
+            },
+            "listPrice": { # Represents an amount of money with its currency type. # List price of one tier.
+              "currencyCode": "A String", # The three-letter currency code defined in ISO 4217.
+              "nanos": 42, # Number of nano (10^-9) units of the amount. The value must be between -999,999,999 and +999,999,999 inclusive. If `units` is positive, `nanos` must be positive or zero. If `units` is zero, `nanos` can be positive, zero, or negative. If `units` is negative, `nanos` must be negative or zero. For example $-1.75 is represented as `units`=-1 and `nanos`=-750,000,000.
+              "units": "A String", # The whole units of the amount. For example if `currencyCode` is `"USD"`, then 1 unit is one US dollar.
+            },
+            "startAmount": { # A representation of a decimal value, such as 2.5. Clients may convert values into language-native decimal formats, such as Java's BigDecimal or Python's decimal.Decimal. [BigDecimal]: https://docs.oracle.com/en/java/javase/11/docs/api/java.base/java/math/BigDecimal.html [decimal.Decimal]: https://docs.python.org/3/library/decimal.html # Lower bound amount for a tier. Tiers 0-100, 100-200 will be represented with two tiers with `start_amount` 0 and 100.
+              "value": "A String", # The decimal value, as a string. The string representation consists of an optional sign, `+` (`U+002B`) or `-` (`U+002D`), followed by a sequence of zero or more decimal digits ("the integer"), optionally followed by a fraction, optionally followed by an exponent. An empty string **should** be interpreted as `0`. The fraction consists of a decimal point followed by zero or more decimal digits. The string must contain at least one digit in either the integer or the fraction. The number formed by the sign, the integer and the fraction is referred to as the significand. The exponent consists of the character `e` (`U+0065`) or `E` (`U+0045`) followed by one or more decimal digits. Services **should** normalize decimal values before storing them by: - Removing an explicitly-provided `+` sign (`+2.5` -> `2.5`). - Replacing a zero-length integer value with `0` (`.5` -> `0.5`). - Coercing the exponent character to upper-case, with explicit sign (`2.5e8` -> `2.5E+8`). - Removing an explicitly-provided zero exponent (`2.5E0` -> `2.5`). Services **may** perform additional normalization based on its own needs and the internal decimal implementation selected, such as shifting the decimal point and exponent value together (example: `2.5E-1` <-> `0.25`). Additionally, services **may** preserve trailing zeroes in the fraction to indicate increased precision, but are not required to do so. Note that only the `.` character is supported to divide the integer and the fraction; `,` **should not** be supported regardless of locale. Additionally, thousand separators **should not** be supported. If a service does support them, values **must** be normalized. The ENBF grammar is: DecimalString = '' | [Sign] Significand [Exponent]; Sign = '+' | '-'; Significand = Digits '.' | [Digits] '.' Digits; Exponent = ('e' | 'E') [Sign] Digits; Digits = { '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' }; Services **should** clearly document the range of supported values, the maximum supported precision (total number of digits), and, if applicable, the scale (number of digits after the decimal point), as well as how it behaves when receiving out-of-bounds values. Services **may** choose to accept values passed as input even when the value has a higher precision or scale than the service supports, and **should** round the value to fit the supported scale. Alternatively, the service **may** error with `400 Bad Request` (`INVALID_ARGUMENT` in gRPC) if precision would be lost. Services **should** error with `400 Bad Request` (`INVALID_ARGUMENT` in gRPC) if the service receives a value outside of the supported range.
+            },
+          },
+        ],
+        "unitInfo": { # Encapsulates the unit information for a Rate # Unit info such as name and quantity.
+          "unit": "A String", # Shorthand for the unit. Example: GiBy.mo.
+          "unitDescription": "A String", # Human-readable description of the unit. Example: gibibyte month.
+          "unitQuantity": { # A representation of a decimal value, such as 2.5. Clients may convert values into language-native decimal formats, such as Java's BigDecimal or Python's decimal.Decimal. [BigDecimal]: https://docs.oracle.com/en/java/javase/11/docs/api/java.base/java/math/BigDecimal.html [decimal.Decimal]: https://docs.python.org/3/library/decimal.html # Unit quantity for the tier. Example: if the RateTier price is $1 per 1000000 Bytes, then `unit_quantity` is set to 1000000.
+            "value": "A String", # The decimal value, as a string. The string representation consists of an optional sign, `+` (`U+002B`) or `-` (`U+002D`), followed by a sequence of zero or more decimal digits ("the integer"), optionally followed by a fraction, optionally followed by an exponent. An empty string **should** be interpreted as `0`. The fraction consists of a decimal point followed by zero or more decimal digits. The string must contain at least one digit in either the integer or the fraction. The number formed by the sign, the integer and the fraction is referred to as the significand. The exponent consists of the character `e` (`U+0065`) or `E` (`U+0045`) followed by one or more decimal digits. Services **should** normalize decimal values before storing them by: - Removing an explicitly-provided `+` sign (`+2.5` -> `2.5`). - Replacing a zero-length integer value with `0` (`.5` -> `0.5`). - Coercing the exponent character to upper-case, with explicit sign (`2.5e8` -> `2.5E+8`). - Removing an explicitly-provided zero exponent (`2.5E0` -> `2.5`). Services **may** perform additional normalization based on its own needs and the internal decimal implementation selected, such as shifting the decimal point and exponent value together (example: `2.5E-1` <-> `0.25`). Additionally, services **may** preserve trailing zeroes in the fraction to indicate increased precision, but are not required to do so. Note that only the `.` character is supported to divide the integer and the fraction; `,` **should not** be supported regardless of locale. Additionally, thousand separators **should not** be supported. If a service does support them, values **must** be normalized. The ENBF grammar is: DecimalString = '' | [Sign] Significand [Exponent]; Sign = '+' | '-'; Significand = Digits '.' | [Digits] '.' Digits; Exponent = ('e' | 'E') [Sign] Digits; Digits = { '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' }; Services **should** clearly document the range of supported values, the maximum supported precision (total number of digits), and, if applicable, the scale (number of digits after the decimal point), as well as how it behaves when receiving out-of-bounds values. Services **may** choose to accept values passed as input even when the value has a higher precision or scale than the service supports, and **should** round the value to fit the supported scale. Alternatively, the service **may** error with `400 Bad Request` (`INVALID_ARGUMENT` in gRPC) if precision would be lost. Services **should** error with `400 Bad Request` (`INVALID_ARGUMENT` in gRPC) if the service receives a value outside of the supported range.
+          },
+        },
+      },
+      "valueType": "A String", # Type of the price. The possible values are: ["unspecified", "rate"].
+    },
+  ],
+  "nextPageToken": "A String", # Token that can be sent as `page_token` in the subsequent request to retrieve the next page. If this field is empty, there are no subsequent pages.
+}
+
+ +
+ list_next() +
Retrieves the next page of results.
+
+        Args:
+          previous_request: The request for the previous page. (required)
+          previous_response: The response from the request for the previous page. (required)
+
+        Returns:
+          A request object that you can call 'execute()' on to request the next
+          page. Returns None if there are no more items in the collection.
+        
+
+ + \ No newline at end of file diff --git a/docs/dyn/cloudbilling_v1beta.skus.html b/docs/dyn/cloudbilling_v1beta.skus.html index a2058172320..2fb2b244a04 100644 --- a/docs/dyn/cloudbilling_v1beta.skus.html +++ b/docs/dyn/cloudbilling_v1beta.skus.html @@ -79,6 +79,11 @@

Instance Methods

Returns the price Resource.

+

+ prices() +

+

Returns the prices Resource.

+

close()

Close httplib2 connections.

diff --git a/docs/dyn/cloudbilling_v1beta.skus.prices.html b/docs/dyn/cloudbilling_v1beta.skus.prices.html new file mode 100644 index 00000000000..af9d11726da --- /dev/null +++ b/docs/dyn/cloudbilling_v1beta.skus.prices.html @@ -0,0 +1,160 @@ + + + +

Cloud Billing API . skus . prices

+

Instance Methods

+

+ close()

+

Close httplib2 connections.

+

+ list(parent, currencyCode=None, pageSize=None, pageToken=None, x__xgafv=None)

+

Lists the latest prices for all SKUs.

+

+ list_next()

+

Retrieves the next page of results.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ +
+ list(parent, currencyCode=None, pageSize=None, pageToken=None, x__xgafv=None) +
Lists the latest prices for all SKUs.
+
+Args:
+  parent: string, Required. To list the prices for all SKUs, use `-` as the SKU ID. Format: `skus/-` Specifying a specific SKU ID returns a collection with one Price object for the SKU. (required)
+  currencyCode: string, Optional. ISO-4217 currency code for the price. If not specified, USD will be used.
+  pageSize: integer, Optional. Maximum number of prices to return. Results may return fewer than this value. Default value is 50 and maximum value is 5000.
+  pageToken: string, Optional. Page token received from a previous ListPrices call to retrieve the next page of results. If this field is empty, the first page is returned.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response message for ListPrices.
+  "nextPageToken": "A String", # Token that can be sent as `page_token` in the subsequent request to retrieve the next page. If this field is empty, there are no subsequent pages.
+  "prices": [ # The returned publicly listed prices.
+    { # Encapsulates the latest price for a SKU.
+      "currencyCode": "A String", # ISO-4217 currency code for the price.
+      "name": "A String", # Resource name for the latest price.
+      "rate": { # Encapsulates a `Rate` price. SKUs with `Rate` price are offered by pricing tiers. The price have 1 or more rate pricing tiers. # Rate price metadata. SKUs with `Rate` price are offered by pricing tiers. The price can have 1 or more rate pricing tiers.
+        "aggregationInfo": { # Encapsulates the aggregation information such as aggregation level and interval for a price. # Aggregation info for tiers such as aggregation level and interval.
+          "interval": "A String", # Interval at which usage is aggregated to compute cost. Example: "MONTHLY" interval indicates that usage is aggregated every month.
+          "level": "A String", # Level at which usage is aggregated to compute cost. Example: "ACCOUNT" level indicates that usage is aggregated across all projects in a single account.
+        },
+        "tiers": [ # All tiers associated with the `Rate` price.
+          { # Encapsulates a rate price tier.
+            "listPrice": { # Represents an amount of money with its currency type. # List price of one tier.
+              "currencyCode": "A String", # The three-letter currency code defined in ISO 4217.
+              "nanos": 42, # Number of nano (10^-9) units of the amount. The value must be between -999,999,999 and +999,999,999 inclusive. If `units` is positive, `nanos` must be positive or zero. If `units` is zero, `nanos` can be positive, zero, or negative. If `units` is negative, `nanos` must be negative or zero. For example $-1.75 is represented as `units`=-1 and `nanos`=-750,000,000.
+              "units": "A String", # The whole units of the amount. For example if `currencyCode` is `"USD"`, then 1 unit is one US dollar.
+            },
+            "startAmount": { # A representation of a decimal value, such as 2.5. Clients may convert values into language-native decimal formats, such as Java's BigDecimal or Python's decimal.Decimal. [BigDecimal]: https://docs.oracle.com/en/java/javase/11/docs/api/java.base/java/math/BigDecimal.html [decimal.Decimal]: https://docs.python.org/3/library/decimal.html # Lower bound amount for a tier. Tiers 0-100, 100-200 will be represented with two tiers with `start_amount` 0 and 100.
+              "value": "A String", # The decimal value, as a string. The string representation consists of an optional sign, `+` (`U+002B`) or `-` (`U+002D`), followed by a sequence of zero or more decimal digits ("the integer"), optionally followed by a fraction, optionally followed by an exponent. An empty string **should** be interpreted as `0`. The fraction consists of a decimal point followed by zero or more decimal digits. The string must contain at least one digit in either the integer or the fraction. The number formed by the sign, the integer and the fraction is referred to as the significand. The exponent consists of the character `e` (`U+0065`) or `E` (`U+0045`) followed by one or more decimal digits. Services **should** normalize decimal values before storing them by: - Removing an explicitly-provided `+` sign (`+2.5` -> `2.5`). - Replacing a zero-length integer value with `0` (`.5` -> `0.5`). - Coercing the exponent character to upper-case, with explicit sign (`2.5e8` -> `2.5E+8`). - Removing an explicitly-provided zero exponent (`2.5E0` -> `2.5`). Services **may** perform additional normalization based on its own needs and the internal decimal implementation selected, such as shifting the decimal point and exponent value together (example: `2.5E-1` <-> `0.25`). Additionally, services **may** preserve trailing zeroes in the fraction to indicate increased precision, but are not required to do so. Note that only the `.` character is supported to divide the integer and the fraction; `,` **should not** be supported regardless of locale. Additionally, thousand separators **should not** be supported. If a service does support them, values **must** be normalized. The ENBF grammar is: DecimalString = '' | [Sign] Significand [Exponent]; Sign = '+' | '-'; Significand = Digits '.' | [Digits] '.' Digits; Exponent = ('e' | 'E') [Sign] Digits; Digits = { '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' }; Services **should** clearly document the range of supported values, the maximum supported precision (total number of digits), and, if applicable, the scale (number of digits after the decimal point), as well as how it behaves when receiving out-of-bounds values. Services **may** choose to accept values passed as input even when the value has a higher precision or scale than the service supports, and **should** round the value to fit the supported scale. Alternatively, the service **may** error with `400 Bad Request` (`INVALID_ARGUMENT` in gRPC) if precision would be lost. Services **should** error with `400 Bad Request` (`INVALID_ARGUMENT` in gRPC) if the service receives a value outside of the supported range.
+            },
+          },
+        ],
+        "unitInfo": { # Encapsulates the unit information for a Rate # Unit info such as name and quantity.
+          "unit": "A String", # Shorthand for the unit. Example: GiBy.mo.
+          "unitDescription": "A String", # Human-readable description of the unit. Example: gibibyte month.
+          "unitQuantity": { # A representation of a decimal value, such as 2.5. Clients may convert values into language-native decimal formats, such as Java's BigDecimal or Python's decimal.Decimal. [BigDecimal]: https://docs.oracle.com/en/java/javase/11/docs/api/java.base/java/math/BigDecimal.html [decimal.Decimal]: https://docs.python.org/3/library/decimal.html # Unit quantity for the tier. Example: if the RateTier price is $1 per 1000000 Bytes, then `unit_quantity` is set to 1000000.
+            "value": "A String", # The decimal value, as a string. The string representation consists of an optional sign, `+` (`U+002B`) or `-` (`U+002D`), followed by a sequence of zero or more decimal digits ("the integer"), optionally followed by a fraction, optionally followed by an exponent. An empty string **should** be interpreted as `0`. The fraction consists of a decimal point followed by zero or more decimal digits. The string must contain at least one digit in either the integer or the fraction. The number formed by the sign, the integer and the fraction is referred to as the significand. The exponent consists of the character `e` (`U+0065`) or `E` (`U+0045`) followed by one or more decimal digits. Services **should** normalize decimal values before storing them by: - Removing an explicitly-provided `+` sign (`+2.5` -> `2.5`). - Replacing a zero-length integer value with `0` (`.5` -> `0.5`). - Coercing the exponent character to upper-case, with explicit sign (`2.5e8` -> `2.5E+8`). - Removing an explicitly-provided zero exponent (`2.5E0` -> `2.5`). Services **may** perform additional normalization based on its own needs and the internal decimal implementation selected, such as shifting the decimal point and exponent value together (example: `2.5E-1` <-> `0.25`). Additionally, services **may** preserve trailing zeroes in the fraction to indicate increased precision, but are not required to do so. Note that only the `.` character is supported to divide the integer and the fraction; `,` **should not** be supported regardless of locale. Additionally, thousand separators **should not** be supported. If a service does support them, values **must** be normalized. The ENBF grammar is: DecimalString = '' | [Sign] Significand [Exponent]; Sign = '+' | '-'; Significand = Digits '.' | [Digits] '.' Digits; Exponent = ('e' | 'E') [Sign] Digits; Digits = { '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' }; Services **should** clearly document the range of supported values, the maximum supported precision (total number of digits), and, if applicable, the scale (number of digits after the decimal point), as well as how it behaves when receiving out-of-bounds values. Services **may** choose to accept values passed as input even when the value has a higher precision or scale than the service supports, and **should** round the value to fit the supported scale. Alternatively, the service **may** error with `400 Bad Request` (`INVALID_ARGUMENT` in gRPC) if precision would be lost. Services **should** error with `400 Bad Request` (`INVALID_ARGUMENT` in gRPC) if the service receives a value outside of the supported range.
+          },
+        },
+      },
+      "valueType": "A String", # Type of the price. It can have values: ["unspecified", "rate"].
+    },
+  ],
+}
+
+ +
+ list_next() +
Retrieves the next page of results.
+
+        Args:
+          previous_request: The request for the previous page. (required)
+          previous_response: The response from the request for the previous page. (required)
+
+        Returns:
+          A request object that you can call 'execute()' on to request the next
+          page. Returns None if there are no more items in the collection.
+        
+
+ + \ No newline at end of file diff --git a/docs/dyn/cloudbilling_v1beta.v1beta.html b/docs/dyn/cloudbilling_v1beta.v1beta.html index fc77fbc50fa..e1d4b0253da 100644 --- a/docs/dyn/cloudbilling_v1beta.v1beta.html +++ b/docs/dyn/cloudbilling_v1beta.v1beta.html @@ -117,9 +117,9 @@

Method Details

}, "workloads": [ # The Google Cloud usage whose costs are estimated. A maximum of 100 workloads can be provided. { # Specifies usage on a single Google Cloud product over a time frame. Each Google Cloud product has its own message, containing specific product configuration parameters of the product usage amounts along each dimension in which the product is billed. - "cloudCdnEgressWorkload": { # Specifies usage for Cloud CDN egress. # Usage on Google Cloud CDN Egress. - "cacheEgressDestination": "A String", # The destination for the cache egress charges. - "cacheEgressRate": { # An amount of usage over a time frame. # Cache egress usage. The rate of data cache egressed in the destination. For example : units such as "GiBy/s" or "TBy/mo". + "cloudCdnEgressWorkload": { # Specifies usage for Cloud CDN Data Transfer. # Usage on Google Cloud CDN Data Transfer. + "cacheEgressDestination": "A String", # The destination for the cache data transfer. + "cacheEgressRate": { # An amount of usage over a time frame. # Cache data transfer usage. The rate of data cache transferred to the destination. Use units such as GiB/s or TiB/mo. "usageRateTimeline": { # A timeline of usage rates. Consists of a series of entries, each of which specifies a constant rate of usage during a time interval. Each entry contains an effective time. The usage rate is in effect from that time until the effective time of the subsequent entry, or, for the last entry, for the remaining portion of estimation time frame. Effective times are specified as an offset into the estimation time frame. Usage is considered to be zero until the `effective_time` of the first entry. All subsequent entries must have an effective time greater than the previous entry and less than the estimate time frame. The effective time on all entries must be an integer number of hours. # A timeline of usage rates over the estimate interval. "unit": "A String", # The unit for the usage rate in each timeline entry. If you provide an incorrect unit for an instance, the correct unit is provided in the error message. The supported units are a subset of [The Unified Code for Units of Measure](https://ucum.org/ucum.html) standard: * **Time units (TIME-UNIT)** * `s` second * `min` minute * `h` hour * `d` day * `wk` week * `mo` month * `yr` year * `ms` millisecond * `us` microsecond * `ns` nanosecond * **Basic storage units (BASIC-STORAGE-UNIT)** * `bit` bit * `By` byte * **Count units (COUNT-UNIT)** * `count` count * **Prefixes (PREFIX)** * `k` kilo (10^3) * `M` mega (10^6) * `G` giga (10^9) * `T` tera (10^12) * `P` peta (10^15) * `Ki` kibi (2^10) * `Mi` mebi (2^20) * `Gi` gibi (2^30) * `Ti` tebi (2^40) * `Pi` pebi (2^50) **Grammar** The grammar also includes these connectors: * `/` division or ratio (as an infix operator). For example: `kBy/{email}` or `MiBy/10ms`. * `.` multiplication or composition (as an infix operator). For example: `GBy.d` or `k{watt}.h`. The grammar for a unit is as follows: ``` Expression = Component { "." Component } { "/" Component } ; Component = ( [ PREFIX ] UNIT | "%" ) [ Annotation ] | Annotation | "1" ; UNIT = TIME-UNIT | STORAGE-UNIT | DATA-UNIT | COUNT-UNIT Annotation = "{" NAME "}" ; ``` Examples: * Request per second: `1/s` or `{requests}/s` * GibiBytes: `GiBy` * GibiBytes * seconds: `GiBy.s` "usageRateTimelineEntries": [ # The timeline entries. Each entry has a start time and usage rate. The start time specifies the effective time of the usage rate. The entries must be sorted by start time in an increasing order. @@ -166,8 +166,8 @@

Method Details

}, }, }, - "cloudInterconnectEgressWorkload": { # The interconnect egress only includes the Interconnect Egress. Please use the standard egress traffic interface to specify your standard egress usage. # Usage on Google Cloud Interconnect Egress. - "egressRate": { # An amount of usage over a time frame. # Data egress usage. This usage applies when you move or copy data from one Google Cloud service to another service. Expected units such as "GiBy/s, By/s, etc." + "cloudInterconnectEgressWorkload": { # Includes the estimate for Interconnect Data Transfer only. To specify usage for data transfer between VMs and internet end-points, use the Standard Tier Internet Data Transfer interface. # Usage on Google Cloud Interconnect Data Transfer. + "egressRate": { # An amount of usage over a time frame. # Outbound data transfer usage. This usage applies when you move or copy data from one Google Cloud service to another service. The units are "GiB/s, B/s, and so on." "usageRateTimeline": { # A timeline of usage rates. Consists of a series of entries, each of which specifies a constant rate of usage during a time interval. Each entry contains an effective time. The usage rate is in effect from that time until the effective time of the subsequent entry, or, for the last entry, for the remaining portion of estimation time frame. Effective times are specified as an offset into the estimation time frame. Usage is considered to be zero until the `effective_time` of the first entry. All subsequent entries must have an effective time greater than the previous entry and less than the estimate time frame. The effective time on all entries must be an integer number of hours. # A timeline of usage rates over the estimate interval. "unit": "A String", # The unit for the usage rate in each timeline entry. If you provide an incorrect unit for an instance, the correct unit is provided in the error message. The supported units are a subset of [The Unified Code for Units of Measure](https://ucum.org/ucum.html) standard: * **Time units (TIME-UNIT)** * `s` second * `min` minute * `h` hour * `d` day * `wk` week * `mo` month * `yr` year * `ms` millisecond * `us` microsecond * `ns` nanosecond * **Basic storage units (BASIC-STORAGE-UNIT)** * `bit` bit * `By` byte * **Count units (COUNT-UNIT)** * `count` count * **Prefixes (PREFIX)** * `k` kilo (10^3) * `M` mega (10^6) * `G` giga (10^9) * `T` tera (10^12) * `P` peta (10^15) * `Ki` kibi (2^10) * `Mi` mebi (2^20) * `Gi` gibi (2^30) * `Ti` tebi (2^40) * `Pi` pebi (2^50) **Grammar** The grammar also includes these connectors: * `/` division or ratio (as an infix operator). For example: `kBy/{email}` or `MiBy/10ms`. * `.` multiplication or composition (as an infix operator). For example: `GBy.d` or `k{watt}.h`. The grammar for a unit is as follows: ``` Expression = Component { "." Component } { "/" Component } ; Component = ( [ PREFIX ] UNIT | "%" ) [ Annotation ] | Annotation | "1" ; UNIT = TIME-UNIT | STORAGE-UNIT | DATA-UNIT | COUNT-UNIT Annotation = "{" NAME "}" ; ``` Examples: * Request per second: `1/s` or `{requests}/s` * GibiBytes: `GiBy` * GibiBytes * seconds: `GiBy.s` "usageRateTimelineEntries": [ # The timeline entries. Each entry has a start time and usage rate. The start time specifies the effective time of the usage rate. The entries must be sorted by start time in an increasing order. @@ -180,7 +180,7 @@

Method Details

], }, }, - "interconnectConnectionLocation": "A String", # Locations in the [Interconnect connection location table](https://cloud.google.com/vpc/network-pricing#interconnect-pricing). This is the interconnect egress charges. + "interconnectConnectionLocation": "A String", # Locations in the [Interconnect connection location table](https://cloud.google.com/vpc/network-pricing#interconnect-pricing). These are the Interconnect Data Transfer charges. }, "cloudInterconnectWorkload": { # Specifies usage for Cloud Interconnect resources. # Usage on Google Cloud Interconnect. "interconnectAttachments": [ # VLAN attachment used for interconnect. @@ -217,9 +217,9 @@

Method Details

}, }, }, - "cloudStorageEgressWorkload": { # Specification of a network type. Network egress within Google Cloud applies when you move or copy data from one Cloud Storage bucket to another or when another Google Cloud service accesses data in your Cloud Storage bucket.This includes the network egress within Google Cloud and the general network usage. # Usage on a cloud storage egress. + "cloudStorageEgressWorkload": { # Specification of a network type. Network data transfer within Google Cloud applies when you move or copy data from one Cloud Storage bucket to another or when another Google Cloud service accesses data in your Cloud Storage bucket.This includes the network data transfer within Google Cloud and the general network usage. # Usage on Cloud Storage Data Transfer. "destinationContinent": "A String", # Where the data is sent to. - "egressRate": { # An amount of usage over a time frame. # Egress usage rate. This usage applies when you move or copy data from one Cloud Storage bucket to another or when another Google Cloud service accesses data in your Cloud Storage bucket. Expected units such as "GiBy/s, By/s, ..." + "egressRate": { # An amount of usage over a time frame. # Data transfer usage rate. This usage applies when you move or copy data from one Cloud Storage bucket to another or when another Google Cloud service accesses data in your Cloud Storage bucket. Expected units such as "GiB/s, B/s, ..." "usageRateTimeline": { # A timeline of usage rates. Consists of a series of entries, each of which specifies a constant rate of usage during a time interval. Each entry contains an effective time. The usage rate is in effect from that time until the effective time of the subsequent entry, or, for the last entry, for the remaining portion of estimation time frame. Effective times are specified as an offset into the estimation time frame. Usage is considered to be zero until the `effective_time` of the first entry. All subsequent entries must have an effective time greater than the previous entry and less than the estimate time frame. The effective time on all entries must be an integer number of hours. # A timeline of usage rates over the estimate interval. "unit": "A String", # The unit for the usage rate in each timeline entry. If you provide an incorrect unit for an instance, the correct unit is provided in the error message. The supported units are a subset of [The Unified Code for Units of Measure](https://ucum.org/ucum.html) standard: * **Time units (TIME-UNIT)** * `s` second * `min` minute * `h` hour * `d` day * `wk` week * `mo` month * `yr` year * `ms` millisecond * `us` microsecond * `ns` nanosecond * **Basic storage units (BASIC-STORAGE-UNIT)** * `bit` bit * `By` byte * **Count units (COUNT-UNIT)** * `count` count * **Prefixes (PREFIX)** * `k` kilo (10^3) * `M` mega (10^6) * `G` giga (10^9) * `T` tera (10^12) * `P` peta (10^15) * `Ki` kibi (2^10) * `Mi` mebi (2^20) * `Gi` gibi (2^30) * `Ti` tebi (2^40) * `Pi` pebi (2^50) **Grammar** The grammar also includes these connectors: * `/` division or ratio (as an infix operator). For example: `kBy/{email}` or `MiBy/10ms`. * `.` multiplication or composition (as an infix operator). For example: `GBy.d` or `k{watt}.h`. The grammar for a unit is as follows: ``` Expression = Component { "." Component } { "/" Component } ; Component = ( [ PREFIX ] UNIT | "%" ) [ Annotation ] | Annotation | "1" ; UNIT = TIME-UNIT | STORAGE-UNIT | DATA-UNIT | COUNT-UNIT Annotation = "{" NAME "}" ; ``` Examples: * Request per second: `1/s` or `{requests}/s` * GibiBytes: `GiBy` * GibiBytes * seconds: `GiBy.s` "usageRateTimelineEntries": [ # The timeline entries. Each entry has a start time and usage rate. The start time specifies the effective time of the usage rate. The entries must be sorted by start time in an increasing order. @@ -366,9 +366,9 @@

Method Details

"region": "A String", # The [region](https://cloud.google.com/compute/docs/regions-zones) where the VMs run. For example: "us-central1". }, "name": "A String", # Required. A name for this workload. All workloads in a `CostScenario` must have a unique `name`. Each `name` may be at most 128 characters long. - "premiumTierEgressWorkload": { # Specify Premium Tier Internet egress networking. # Usage on Premium Tier Internet Egress. + "premiumTierEgressWorkload": { # Specify a Premium Tier Internet Data Transfer networking workload. # Usage on Premium Tier Internet Data Transfer. "destinationContinent": "A String", # Where the data is sent to. - "egressRate": { # An amount of usage over a time frame. # Premium Tier egress usage. Expected units such as "GiBy/s, By/s, etc." + "egressRate": { # An amount of usage over a time frame. # Premium Tier Data Transfer usage. The expected units are GiB/s, B/s, and so on. "usageRateTimeline": { # A timeline of usage rates. Consists of a series of entries, each of which specifies a constant rate of usage during a time interval. Each entry contains an effective time. The usage rate is in effect from that time until the effective time of the subsequent entry, or, for the last entry, for the remaining portion of estimation time frame. Effective times are specified as an offset into the estimation time frame. Usage is considered to be zero until the `effective_time` of the first entry. All subsequent entries must have an effective time greater than the previous entry and less than the estimate time frame. The effective time on all entries must be an integer number of hours. # A timeline of usage rates over the estimate interval. "unit": "A String", # The unit for the usage rate in each timeline entry. If you provide an incorrect unit for an instance, the correct unit is provided in the error message. The supported units are a subset of [The Unified Code for Units of Measure](https://ucum.org/ucum.html) standard: * **Time units (TIME-UNIT)** * `s` second * `min` minute * `h` hour * `d` day * `wk` week * `mo` month * `yr` year * `ms` millisecond * `us` microsecond * `ns` nanosecond * **Basic storage units (BASIC-STORAGE-UNIT)** * `bit` bit * `By` byte * **Count units (COUNT-UNIT)** * `count` count * **Prefixes (PREFIX)** * `k` kilo (10^3) * `M` mega (10^6) * `G` giga (10^9) * `T` tera (10^12) * `P` peta (10^15) * `Ki` kibi (2^10) * `Mi` mebi (2^20) * `Gi` gibi (2^30) * `Ti` tebi (2^40) * `Pi` pebi (2^50) **Grammar** The grammar also includes these connectors: * `/` division or ratio (as an infix operator). For example: `kBy/{email}` or `MiBy/10ms`. * `.` multiplication or composition (as an infix operator). For example: `GBy.d` or `k{watt}.h`. The grammar for a unit is as follows: ``` Expression = Component { "." Component } { "/" Component } ; Component = ( [ PREFIX ] UNIT | "%" ) [ Annotation ] | Annotation | "1" ; UNIT = TIME-UNIT | STORAGE-UNIT | DATA-UNIT | COUNT-UNIT Annotation = "{" NAME "}" ; ``` Examples: * Request per second: `1/s` or `{requests}/s` * GibiBytes: `GiBy` * GibiBytes * seconds: `GiBy.s` "usageRateTimelineEntries": [ # The timeline entries. Each entry has a start time and usage rate. The start time specifies the effective time of the usage rate. The entries must be sorted by start time in an increasing order. @@ -381,10 +381,10 @@

Method Details

], }, }, - "sourceRegion": "A String", # Which [region](https://cloud.google.com/compute/docs/regions-zones) the egress data comes from. + "sourceRegion": "A String", # Which [region](https://cloud.google.com/compute/docs/regions-zones) the data comes from. }, - "standardTierEgressWorkload": { # Specify Standard Tier Internet egress networking. # Usage on Standard Tier Internet Egress. - "egressRate": { # An amount of usage over a time frame. # Standard tier egress usage. Expected units such as "GiBy/s, By/s, etc." + "standardTierEgressWorkload": { # Specify Standard Tier Internet Data Transfer. # Usage on Standard Tier Internet Data Transfer. + "egressRate": { # An amount of usage over a time frame. # Standard Tier Data Transfer usage. The expected units are GiB/s, B/s, and so on. "usageRateTimeline": { # A timeline of usage rates. Consists of a series of entries, each of which specifies a constant rate of usage during a time interval. Each entry contains an effective time. The usage rate is in effect from that time until the effective time of the subsequent entry, or, for the last entry, for the remaining portion of estimation time frame. Effective times are specified as an offset into the estimation time frame. Usage is considered to be zero until the `effective_time` of the first entry. All subsequent entries must have an effective time greater than the previous entry and less than the estimate time frame. The effective time on all entries must be an integer number of hours. # A timeline of usage rates over the estimate interval. "unit": "A String", # The unit for the usage rate in each timeline entry. If you provide an incorrect unit for an instance, the correct unit is provided in the error message. The supported units are a subset of [The Unified Code for Units of Measure](https://ucum.org/ucum.html) standard: * **Time units (TIME-UNIT)** * `s` second * `min` minute * `h` hour * `d` day * `wk` week * `mo` month * `yr` year * `ms` millisecond * `us` microsecond * `ns` nanosecond * **Basic storage units (BASIC-STORAGE-UNIT)** * `bit` bit * `By` byte * **Count units (COUNT-UNIT)** * `count` count * **Prefixes (PREFIX)** * `k` kilo (10^3) * `M` mega (10^6) * `G` giga (10^9) * `T` tera (10^12) * `P` peta (10^15) * `Ki` kibi (2^10) * `Mi` mebi (2^20) * `Gi` gibi (2^30) * `Ti` tebi (2^40) * `Pi` pebi (2^50) **Grammar** The grammar also includes these connectors: * `/` division or ratio (as an infix operator). For example: `kBy/{email}` or `MiBy/10ms`. * `.` multiplication or composition (as an infix operator). For example: `GBy.d` or `k{watt}.h`. The grammar for a unit is as follows: ``` Expression = Component { "." Component } { "/" Component } ; Component = ( [ PREFIX ] UNIT | "%" ) [ Annotation ] | Annotation | "1" ; UNIT = TIME-UNIT | STORAGE-UNIT | DATA-UNIT | COUNT-UNIT Annotation = "{" NAME "}" ; ``` Examples: * Request per second: `1/s` or `{requests}/s` * GibiBytes: `GiBy` * GibiBytes * seconds: `GiBy.s` "usageRateTimelineEntries": [ # The timeline entries. Each entry has a start time and usage rate. The start time specifies the effective time of the usage rate. The entries must be sorted by start time in an increasing order. @@ -397,12 +397,12 @@

Method Details

], }, }, - "sourceRegion": "A String", # Which [region](https://cloud.google.com/compute/docs/regions-zones) the egress data comes from. + "sourceRegion": "A String", # Which [region](https://cloud.google.com/compute/docs/regions-zones) the data is transferred from. }, - "vmToVmEgressWorkload": { # Specify VM to VM egress. # Usage on Vm to Vm Egress. - "interRegionEgress": { # Egress traffic between two regions. - "destinationRegion": "A String", # Which [region](https://cloud.google.com/compute/docs/regions-zones) the egress data goes to. - "egressRate": { # An amount of usage over a time frame. # VM to VM egress usage. Expected units such as "GiBy/s, By/s, etc." + "vmToVmEgressWorkload": { # Specify VM to VM data transfer. # Usage on VM to VM Data Transfer. + "interRegionEgress": { # Data transfer between two regions. + "destinationRegion": "A String", # Which [region](https://cloud.google.com/compute/docs/regions-zones) the data is transferred to. + "egressRate": { # An amount of usage over a time frame. # VM to VM data transfer usage. The expected units such are GiB/s, B/s, and so on. "usageRateTimeline": { # A timeline of usage rates. Consists of a series of entries, each of which specifies a constant rate of usage during a time interval. Each entry contains an effective time. The usage rate is in effect from that time until the effective time of the subsequent entry, or, for the last entry, for the remaining portion of estimation time frame. Effective times are specified as an offset into the estimation time frame. Usage is considered to be zero until the `effective_time` of the first entry. All subsequent entries must have an effective time greater than the previous entry and less than the estimate time frame. The effective time on all entries must be an integer number of hours. # A timeline of usage rates over the estimate interval. "unit": "A String", # The unit for the usage rate in each timeline entry. If you provide an incorrect unit for an instance, the correct unit is provided in the error message. The supported units are a subset of [The Unified Code for Units of Measure](https://ucum.org/ucum.html) standard: * **Time units (TIME-UNIT)** * `s` second * `min` minute * `h` hour * `d` day * `wk` week * `mo` month * `yr` year * `ms` millisecond * `us` microsecond * `ns` nanosecond * **Basic storage units (BASIC-STORAGE-UNIT)** * `bit` bit * `By` byte * **Count units (COUNT-UNIT)** * `count` count * **Prefixes (PREFIX)** * `k` kilo (10^3) * `M` mega (10^6) * `G` giga (10^9) * `T` tera (10^12) * `P` peta (10^15) * `Ki` kibi (2^10) * `Mi` mebi (2^20) * `Gi` gibi (2^30) * `Ti` tebi (2^40) * `Pi` pebi (2^50) **Grammar** The grammar also includes these connectors: * `/` division or ratio (as an infix operator). For example: `kBy/{email}` or `MiBy/10ms`. * `.` multiplication or composition (as an infix operator). For example: `GBy.d` or `k{watt}.h`. The grammar for a unit is as follows: ``` Expression = Component { "." Component } { "/" Component } ; Component = ( [ PREFIX ] UNIT | "%" ) [ Annotation ] | Annotation | "1" ; UNIT = TIME-UNIT | STORAGE-UNIT | DATA-UNIT | COUNT-UNIT Annotation = "{" NAME "}" ; ``` Examples: * Request per second: `1/s` or `{requests}/s` * GibiBytes: `GiBy` * GibiBytes * seconds: `GiBy.s` "usageRateTimelineEntries": [ # The timeline entries. Each entry has a start time and usage rate. The start time specifies the effective time of the usage rate. The entries must be sorted by start time in an increasing order. @@ -415,10 +415,10 @@

Method Details

], }, }, - "sourceRegion": "A String", # Which [region](https://cloud.google.com/compute/docs/regions-zones) the egress data comes from. + "sourceRegion": "A String", # Which [region](https://cloud.google.com/compute/docs/regions-zones) the data is transferred from. }, - "intraRegionEgress": { # Egress traffic within the same region. When source region and destination region are in the same zone, using the internal IP addresses, there isn't any egress charge. - "egressRate": { # An amount of usage over a time frame. # VM to VM egress usage. Expected units such as "GiBy/s, By/s, etc." + "intraRegionEgress": { # Data transfer within the same region. When the source region and destination region are in the same zone, using internal IP addresses, there isn't any charge for data transfer. + "egressRate": { # An amount of usage over a time frame. # VM to VM data transfer usage. The expected are GiB/s, B/s, and so on. "usageRateTimeline": { # A timeline of usage rates. Consists of a series of entries, each of which specifies a constant rate of usage during a time interval. Each entry contains an effective time. The usage rate is in effect from that time until the effective time of the subsequent entry, or, for the last entry, for the remaining portion of estimation time frame. Effective times are specified as an offset into the estimation time frame. Usage is considered to be zero until the `effective_time` of the first entry. All subsequent entries must have an effective time greater than the previous entry and less than the estimate time frame. The effective time on all entries must be an integer number of hours. # A timeline of usage rates over the estimate interval. "unit": "A String", # The unit for the usage rate in each timeline entry. If you provide an incorrect unit for an instance, the correct unit is provided in the error message. The supported units are a subset of [The Unified Code for Units of Measure](https://ucum.org/ucum.html) standard: * **Time units (TIME-UNIT)** * `s` second * `min` minute * `h` hour * `d` day * `wk` week * `mo` month * `yr` year * `ms` millisecond * `us` microsecond * `ns` nanosecond * **Basic storage units (BASIC-STORAGE-UNIT)** * `bit` bit * `By` byte * **Count units (COUNT-UNIT)** * `count` count * **Prefixes (PREFIX)** * `k` kilo (10^3) * `M` mega (10^6) * `G` giga (10^9) * `T` tera (10^12) * `P` peta (10^15) * `Ki` kibi (2^10) * `Mi` mebi (2^20) * `Gi` gibi (2^30) * `Ti` tebi (2^40) * `Pi` pebi (2^50) **Grammar** The grammar also includes these connectors: * `/` division or ratio (as an infix operator). For example: `kBy/{email}` or `MiBy/10ms`. * `.` multiplication or composition (as an infix operator). For example: `GBy.d` or `k{watt}.h`. The grammar for a unit is as follows: ``` Expression = Component { "." Component } { "/" Component } ; Component = ( [ PREFIX ] UNIT | "%" ) [ Annotation ] | Annotation | "1" ; UNIT = TIME-UNIT | STORAGE-UNIT | DATA-UNIT | COUNT-UNIT Annotation = "{" NAME "}" ; ``` Examples: * Request per second: `1/s` or `{requests}/s` * GibiBytes: `GiBy` * GibiBytes * seconds: `GiBy.s` "usageRateTimelineEntries": [ # The timeline entries. Each entry has a start time and usage rate. The start time specifies the effective time of the usage rate. The entries must be sorted by start time in an increasing order. diff --git a/docs/dyn/clouddeploy_v1.projects.locations.customTargetTypes.html b/docs/dyn/clouddeploy_v1.projects.locations.customTargetTypes.html new file mode 100644 index 00000000000..e7ad1fa2596 --- /dev/null +++ b/docs/dyn/clouddeploy_v1.projects.locations.customTargetTypes.html @@ -0,0 +1,426 @@ + + + +

Cloud Deploy API . projects . locations . customTargetTypes

+

Instance Methods

+

+ close()

+

Close httplib2 connections.

+

+ create(parent, body=None, customTargetTypeId=None, requestId=None, validateOnly=None, x__xgafv=None)

+

Creates a new CustomTargetType in a given project and location.

+

+ delete(name, allowMissing=None, etag=None, requestId=None, validateOnly=None, x__xgafv=None)

+

Deletes a single CustomTargetType.

+

+ get(name, x__xgafv=None)

+

Gets details of a single CustomTargetType.

+

+ list(parent, filter=None, orderBy=None, pageSize=None, pageToken=None, x__xgafv=None)

+

Lists CustomTargetTypes in a given project and location.

+

+ list_next()

+

Retrieves the next page of results.

+

+ patch(name, allowMissing=None, body=None, requestId=None, updateMask=None, validateOnly=None, x__xgafv=None)

+

Updates a single CustomTargetType.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ +
+ create(parent, body=None, customTargetTypeId=None, requestId=None, validateOnly=None, x__xgafv=None) +
Creates a new CustomTargetType in a given project and location.
+
+Args:
+  parent: string, Required. The parent collection in which the `CustomTargetType` should be created in. Format should be `projects/{project_id}/locations/{location_name}`. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # A `CustomTargetType` resource in the Cloud Deploy API. A `CustomTargetType` defines a type of custom target that can be referenced in a `Target` in order to facilitate deploying to a runtime that does not have a 1P integration with Cloud Deploy.
+  "annotations": { # Optional. User annotations. These attributes can only be set and used by the user, and not by Cloud Deploy. See https://google.aip.dev/128#annotations for more details such as format and size limitations.
+    "a_key": "A String",
+  },
+  "createTime": "A String", # Output only. Time at which the `CustomTargetType` was created.
+  "customActions": { # CustomTargetSkaffoldActions represents the `CustomTargetType` configuration using Skaffold custom actions. # Configures render and deploy for the `CustomTargetType` using Skaffold custom actions.
+    "deployAction": "A String", # Required. The Skaffold custom action responsible for deploy operations.
+    "includeSkaffoldModules": [ # Optional. List of Skaffold modules Cloud Deploy will include in the Skaffold Config as required before performing diagnose.
+      { # Skaffold Config modules and their remote source.
+        "configs": [ # Optional. The Skaffold Config modules to use from the specified source.
+          "A String",
+        ],
+        "git": { # Git repository containing Skaffold Config modules. # Remote git repository containing the Skaffold Config modules.
+          "path": "A String", # Optional. Relative path from the repository root to the Skaffold file.
+          "ref": "A String", # Optional. Git ref the package should be cloned from.
+          "repo": "A String", # Required. Git repository the package should be cloned from.
+        },
+        "googleCloudStorage": { # Cloud Storage bucket containing Skaffold Config modules. # Cloud Storage bucket containing the Skaffold Config modules.
+          "path": "A String", # Optional. Relative path from the source to the Skaffold file.
+          "source": "A String", # Required. Cloud Storage source paths to copy recursively. For example, providing "gs://my-bucket/dir/configs/*" will result in Skaffold copying all files within the "dir/configs" directory in the bucket "my-bucket".
+        },
+      },
+    ],
+    "renderAction": "A String", # Optional. The Skaffold custom action responsible for render operations. If not provided then Cloud Deploy will perform the render operations via `skaffold render`.
+  },
+  "customTargetTypeId": "A String", # Output only. Resource id of the `CustomTargetType`.
+  "description": "A String", # Optional. Description of the `CustomTargetType`. Max length is 255 characters.
+  "etag": "A String", # Optional. This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding.
+  "labels": { # Optional. Labels are attributes that can be set and used by both the user and by Cloud Deploy. Labels must meet the following constraints: * Keys and values can contain only lowercase letters, numeric characters, underscores, and dashes. * All characters must use UTF-8 encoding, and international characters are allowed. * Keys must start with a lowercase letter or international character. * Each resource is limited to a maximum of 64 labels. Both keys and values are additionally constrained to be <= 128 bytes.
+    "a_key": "A String",
+  },
+  "name": "A String", # Optional. Name of the `CustomTargetType`. Format is `projects/{project}/locations/{location}/customTargetTypes/a-z{0,62}`.
+  "uid": "A String", # Output only. Unique identifier of the `CustomTargetType`.
+  "updateTime": "A String", # Output only. Most recent time at which the `CustomTargetType` was updated.
+}
+
+  customTargetTypeId: string, Required. ID of the `CustomTargetType`.
+  requestId: string, Optional. A request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. The server will guarantee that for at least 60 minutes since the first request. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).
+  validateOnly: boolean, Optional. If set to true, the request is validated and the user is provided with an expected result, but no actual change is made.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # This resource represents a long-running operation that is the result of a network API call.
+  "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
+  "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+    "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+    "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+      {
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    ],
+    "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+  },
+  "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+  "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
+  "response": { # The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+}
+
+ +
+ delete(name, allowMissing=None, etag=None, requestId=None, validateOnly=None, x__xgafv=None) +
Deletes a single CustomTargetType.
+
+Args:
+  name: string, Required. The name of the `CustomTargetType` to delete. Format must be `projects/{project_id}/locations/{location_name}/customTargetTypes/{custom_target_type}`. (required)
+  allowMissing: boolean, Optional. If set to true, then deleting an already deleted or non-existing `CustomTargetType` will succeed.
+  etag: string, Optional. This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding.
+  requestId: string, Optional. A request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. The server will guarantee that for at least 60 minutes after the first request. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).
+  validateOnly: boolean, Optional. If set to true, the request is validated but no actual change is made.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # This resource represents a long-running operation that is the result of a network API call.
+  "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
+  "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+    "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+    "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+      {
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    ],
+    "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+  },
+  "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+  "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
+  "response": { # The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+}
+
+ +
+ get(name, x__xgafv=None) +
Gets details of a single CustomTargetType.
+
+Args:
+  name: string, Required. Name of the `CustomTargetType`. Format must be `projects/{project_id}/locations/{location_name}/customTargetTypes/{custom_target_type}`. (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # A `CustomTargetType` resource in the Cloud Deploy API. A `CustomTargetType` defines a type of custom target that can be referenced in a `Target` in order to facilitate deploying to a runtime that does not have a 1P integration with Cloud Deploy.
+  "annotations": { # Optional. User annotations. These attributes can only be set and used by the user, and not by Cloud Deploy. See https://google.aip.dev/128#annotations for more details such as format and size limitations.
+    "a_key": "A String",
+  },
+  "createTime": "A String", # Output only. Time at which the `CustomTargetType` was created.
+  "customActions": { # CustomTargetSkaffoldActions represents the `CustomTargetType` configuration using Skaffold custom actions. # Configures render and deploy for the `CustomTargetType` using Skaffold custom actions.
+    "deployAction": "A String", # Required. The Skaffold custom action responsible for deploy operations.
+    "includeSkaffoldModules": [ # Optional. List of Skaffold modules Cloud Deploy will include in the Skaffold Config as required before performing diagnose.
+      { # Skaffold Config modules and their remote source.
+        "configs": [ # Optional. The Skaffold Config modules to use from the specified source.
+          "A String",
+        ],
+        "git": { # Git repository containing Skaffold Config modules. # Remote git repository containing the Skaffold Config modules.
+          "path": "A String", # Optional. Relative path from the repository root to the Skaffold file.
+          "ref": "A String", # Optional. Git ref the package should be cloned from.
+          "repo": "A String", # Required. Git repository the package should be cloned from.
+        },
+        "googleCloudStorage": { # Cloud Storage bucket containing Skaffold Config modules. # Cloud Storage bucket containing the Skaffold Config modules.
+          "path": "A String", # Optional. Relative path from the source to the Skaffold file.
+          "source": "A String", # Required. Cloud Storage source paths to copy recursively. For example, providing "gs://my-bucket/dir/configs/*" will result in Skaffold copying all files within the "dir/configs" directory in the bucket "my-bucket".
+        },
+      },
+    ],
+    "renderAction": "A String", # Optional. The Skaffold custom action responsible for render operations. If not provided then Cloud Deploy will perform the render operations via `skaffold render`.
+  },
+  "customTargetTypeId": "A String", # Output only. Resource id of the `CustomTargetType`.
+  "description": "A String", # Optional. Description of the `CustomTargetType`. Max length is 255 characters.
+  "etag": "A String", # Optional. This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding.
+  "labels": { # Optional. Labels are attributes that can be set and used by both the user and by Cloud Deploy. Labels must meet the following constraints: * Keys and values can contain only lowercase letters, numeric characters, underscores, and dashes. * All characters must use UTF-8 encoding, and international characters are allowed. * Keys must start with a lowercase letter or international character. * Each resource is limited to a maximum of 64 labels. Both keys and values are additionally constrained to be <= 128 bytes.
+    "a_key": "A String",
+  },
+  "name": "A String", # Optional. Name of the `CustomTargetType`. Format is `projects/{project}/locations/{location}/customTargetTypes/a-z{0,62}`.
+  "uid": "A String", # Output only. Unique identifier of the `CustomTargetType`.
+  "updateTime": "A String", # Output only. Most recent time at which the `CustomTargetType` was updated.
+}
+
+ +
+ list(parent, filter=None, orderBy=None, pageSize=None, pageToken=None, x__xgafv=None) +
Lists CustomTargetTypes in a given project and location.
+
+Args:
+  parent: string, Required. The parent that owns this collection of custom target types. Format must be `projects/{project_id}/locations/{location_name}`. (required)
+  filter: string, Optional. Filter custom target types to be returned. See https://google.aip.dev/160 for more details.
+  orderBy: string, Optional. Field to sort by. See https://google.aip.dev/132#ordering for more details.
+  pageSize: integer, Optional. The maximum number of `CustomTargetType` objects to return. The service may return fewer than this value. If unspecified, at most 50 `CustomTargetType` objects will be returned. The maximum value is 1000; values above 1000 will be set to 1000.
+  pageToken: string, Optional. A page token, received from a previous `ListCustomTargetTypes` call. Provide this to retrieve the subsequent page. When paginating, all other provided parameters match the call that provided the page token.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # The response object from `ListCustomTargetTypes.`
+  "customTargetTypes": [ # The `CustomTargetType` objects.
+    { # A `CustomTargetType` resource in the Cloud Deploy API. A `CustomTargetType` defines a type of custom target that can be referenced in a `Target` in order to facilitate deploying to a runtime that does not have a 1P integration with Cloud Deploy.
+      "annotations": { # Optional. User annotations. These attributes can only be set and used by the user, and not by Cloud Deploy. See https://google.aip.dev/128#annotations for more details such as format and size limitations.
+        "a_key": "A String",
+      },
+      "createTime": "A String", # Output only. Time at which the `CustomTargetType` was created.
+      "customActions": { # CustomTargetSkaffoldActions represents the `CustomTargetType` configuration using Skaffold custom actions. # Configures render and deploy for the `CustomTargetType` using Skaffold custom actions.
+        "deployAction": "A String", # Required. The Skaffold custom action responsible for deploy operations.
+        "includeSkaffoldModules": [ # Optional. List of Skaffold modules Cloud Deploy will include in the Skaffold Config as required before performing diagnose.
+          { # Skaffold Config modules and their remote source.
+            "configs": [ # Optional. The Skaffold Config modules to use from the specified source.
+              "A String",
+            ],
+            "git": { # Git repository containing Skaffold Config modules. # Remote git repository containing the Skaffold Config modules.
+              "path": "A String", # Optional. Relative path from the repository root to the Skaffold file.
+              "ref": "A String", # Optional. Git ref the package should be cloned from.
+              "repo": "A String", # Required. Git repository the package should be cloned from.
+            },
+            "googleCloudStorage": { # Cloud Storage bucket containing Skaffold Config modules. # Cloud Storage bucket containing the Skaffold Config modules.
+              "path": "A String", # Optional. Relative path from the source to the Skaffold file.
+              "source": "A String", # Required. Cloud Storage source paths to copy recursively. For example, providing "gs://my-bucket/dir/configs/*" will result in Skaffold copying all files within the "dir/configs" directory in the bucket "my-bucket".
+            },
+          },
+        ],
+        "renderAction": "A String", # Optional. The Skaffold custom action responsible for render operations. If not provided then Cloud Deploy will perform the render operations via `skaffold render`.
+      },
+      "customTargetTypeId": "A String", # Output only. Resource id of the `CustomTargetType`.
+      "description": "A String", # Optional. Description of the `CustomTargetType`. Max length is 255 characters.
+      "etag": "A String", # Optional. This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding.
+      "labels": { # Optional. Labels are attributes that can be set and used by both the user and by Cloud Deploy. Labels must meet the following constraints: * Keys and values can contain only lowercase letters, numeric characters, underscores, and dashes. * All characters must use UTF-8 encoding, and international characters are allowed. * Keys must start with a lowercase letter or international character. * Each resource is limited to a maximum of 64 labels. Both keys and values are additionally constrained to be <= 128 bytes.
+        "a_key": "A String",
+      },
+      "name": "A String", # Optional. Name of the `CustomTargetType`. Format is `projects/{project}/locations/{location}/customTargetTypes/a-z{0,62}`.
+      "uid": "A String", # Output only. Unique identifier of the `CustomTargetType`.
+      "updateTime": "A String", # Output only. Most recent time at which the `CustomTargetType` was updated.
+    },
+  ],
+  "nextPageToken": "A String", # A token, which can be sent as `page_token` to retrieve the next page. If this field is omitted, there are no subsequent pages.
+  "unreachable": [ # Locations that could not be reached.
+    "A String",
+  ],
+}
+
+ +
+ list_next() +
Retrieves the next page of results.
+
+        Args:
+          previous_request: The request for the previous page. (required)
+          previous_response: The response from the request for the previous page. (required)
+
+        Returns:
+          A request object that you can call 'execute()' on to request the next
+          page. Returns None if there are no more items in the collection.
+        
+
+ +
+ patch(name, allowMissing=None, body=None, requestId=None, updateMask=None, validateOnly=None, x__xgafv=None) +
Updates a single CustomTargetType.
+
+Args:
+  name: string, Optional. Name of the `CustomTargetType`. Format is `projects/{project}/locations/{location}/customTargetTypes/a-z{0,62}`. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # A `CustomTargetType` resource in the Cloud Deploy API. A `CustomTargetType` defines a type of custom target that can be referenced in a `Target` in order to facilitate deploying to a runtime that does not have a 1P integration with Cloud Deploy.
+  "annotations": { # Optional. User annotations. These attributes can only be set and used by the user, and not by Cloud Deploy. See https://google.aip.dev/128#annotations for more details such as format and size limitations.
+    "a_key": "A String",
+  },
+  "createTime": "A String", # Output only. Time at which the `CustomTargetType` was created.
+  "customActions": { # CustomTargetSkaffoldActions represents the `CustomTargetType` configuration using Skaffold custom actions. # Configures render and deploy for the `CustomTargetType` using Skaffold custom actions.
+    "deployAction": "A String", # Required. The Skaffold custom action responsible for deploy operations.
+    "includeSkaffoldModules": [ # Optional. List of Skaffold modules Cloud Deploy will include in the Skaffold Config as required before performing diagnose.
+      { # Skaffold Config modules and their remote source.
+        "configs": [ # Optional. The Skaffold Config modules to use from the specified source.
+          "A String",
+        ],
+        "git": { # Git repository containing Skaffold Config modules. # Remote git repository containing the Skaffold Config modules.
+          "path": "A String", # Optional. Relative path from the repository root to the Skaffold file.
+          "ref": "A String", # Optional. Git ref the package should be cloned from.
+          "repo": "A String", # Required. Git repository the package should be cloned from.
+        },
+        "googleCloudStorage": { # Cloud Storage bucket containing Skaffold Config modules. # Cloud Storage bucket containing the Skaffold Config modules.
+          "path": "A String", # Optional. Relative path from the source to the Skaffold file.
+          "source": "A String", # Required. Cloud Storage source paths to copy recursively. For example, providing "gs://my-bucket/dir/configs/*" will result in Skaffold copying all files within the "dir/configs" directory in the bucket "my-bucket".
+        },
+      },
+    ],
+    "renderAction": "A String", # Optional. The Skaffold custom action responsible for render operations. If not provided then Cloud Deploy will perform the render operations via `skaffold render`.
+  },
+  "customTargetTypeId": "A String", # Output only. Resource id of the `CustomTargetType`.
+  "description": "A String", # Optional. Description of the `CustomTargetType`. Max length is 255 characters.
+  "etag": "A String", # Optional. This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding.
+  "labels": { # Optional. Labels are attributes that can be set and used by both the user and by Cloud Deploy. Labels must meet the following constraints: * Keys and values can contain only lowercase letters, numeric characters, underscores, and dashes. * All characters must use UTF-8 encoding, and international characters are allowed. * Keys must start with a lowercase letter or international character. * Each resource is limited to a maximum of 64 labels. Both keys and values are additionally constrained to be <= 128 bytes.
+    "a_key": "A String",
+  },
+  "name": "A String", # Optional. Name of the `CustomTargetType`. Format is `projects/{project}/locations/{location}/customTargetTypes/a-z{0,62}`.
+  "uid": "A String", # Output only. Unique identifier of the `CustomTargetType`.
+  "updateTime": "A String", # Output only. Most recent time at which the `CustomTargetType` was updated.
+}
+
+  allowMissing: boolean, Optional. If set to true, updating a `CustomTargetType` that does not exist will result in the creation of a new `CustomTargetType`.
+  requestId: string, Optional. A request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. The server will guarantee that for at least 60 minutes since the first request. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).
+  updateMask: string, Required. Field mask is used to specify the fields to be overwritten in the `CustomTargetType` resource by the update. The fields specified in the update_mask are relative to the resource, not the full request. A field will be overwritten if it is in the mask. If the user does not provide a mask then all fields will be overwritten.
+  validateOnly: boolean, Optional. If set to true, the request is validated and the user is provided with an expected result, but no actual change is made.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # This resource represents a long-running operation that is the result of a network API call.
+  "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
+  "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+    "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+    "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+      {
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    ],
+    "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+  },
+  "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+  "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
+  "response": { # The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+}
+
+ + \ No newline at end of file diff --git a/docs/dyn/clouddeploy_v1.projects.locations.deliveryPipelines.automationRuns.html b/docs/dyn/clouddeploy_v1.projects.locations.deliveryPipelines.automationRuns.html index 1c0578c7f78..fd9e556541f 100644 --- a/docs/dyn/clouddeploy_v1.projects.locations.deliveryPipelines.automationRuns.html +++ b/docs/dyn/clouddeploy_v1.projects.locations.deliveryPipelines.automationRuns.html @@ -133,15 +133,15 @@

Method Details

Returns: An object of the form: - { # An `AutomationRun` resource in the Cloud Deploy API. An `AutomationRun` represents an automation execution instance of an automation rule. + { # An `AutomationRun` resource in the Cloud Deploy API. An `AutomationRun` represents an execution instance of an automation rule. "advanceRolloutOperation": { # Contains the information of an automated advance-rollout operation. # Output only. Advances a rollout to the next phase. - "destinationPhase": "A String", # Output only. The phase to which the rollout will be advanced to. + "destinationPhase": "A String", # Output only. The phase the rollout will be advanced to. "rollout": "A String", # Output only. The name of the rollout that initiates the `AutomationRun`. "sourcePhase": "A String", # Output only. The phase of a deployment that initiated the operation. "wait": "A String", # Output only. How long the operation will be paused. }, "automationId": "A String", # Output only. The ID of the automation that initiated the operation. - "automationSnapshot": { # An `Automation` resource in the Cloud Deploy API. An `Automation` enables the automation of manually driven actions for a Delivery Pipeline, which includes Release promotion amongst Targets, Rollout repair and Rollout deployment strategy advancement. The intention of Automation is to reduce manual intervention in the continuous delivery process. # Output only. Snapshot of the Automation taken at AutomationRun creation time. + "automationSnapshot": { # An `Automation` resource in the Cloud Deploy API. An `Automation` enables the automation of manually driven actions for a Delivery Pipeline, which includes Release promotion among Targets, Rollout repair and Rollout deployment strategy advancement. The intention of Automation is to reduce manual intervention in the continuous delivery process. # Output only. Snapshot of the Automation taken at AutomationRun creation time. "annotations": { # Optional. User annotations. These attributes can only be set and used by the user, and not by Cloud Deploy. Annotations must meet the following constraints: * Annotations are key/value pairs. * Valid annotation keys have two segments: an optional prefix and name, separated by a slash (`/`). * The name segment is required and must be 63 characters or less, beginning and ending with an alphanumeric character (`[a-z0-9A-Z]`) with dashes (`-`), underscores (`_`), dots (`.`), and alphanumerics between. * The prefix is optional. If specified, the prefix must be a DNS subdomain: a series of DNS labels separated by dots(`.`), not longer than 253 characters in total, followed by a slash (`/`). See https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/#syntax-and-character-set for more details. "a_key": "A String", }, @@ -156,7 +156,7 @@

Method Details

{ # `AutomationRule` defines the automation activities. "advanceRolloutRule": { # The `AdvanceRollout` automation rule will automatically advance a successful Rollout to the next phase. # Optional. The `AdvanceRolloutRule` will automatically advance a successful Rollout. "condition": { # `AutomationRuleCondition` contains conditions relevant to an `Automation` rule. # Output only. Information around the state of the Automation rule. - "targetsPresentCondition": { # TargetsPresentCondition contains information on any Targets defined in the Delivery Pipeline that do not actually exist. # Optional. Details around targets enumerated in the rule. + "targetsPresentCondition": { # `TargetsPresentCondition` contains information on any Targets referenced in the Delivery Pipeline that do not actually exist. # Optional. Details around targets enumerated in the rule. "missingTargets": [ # The list of Target names that do not exist. For example, `projects/{project_id}/locations/{location_name}/targets/{target_name}`. "A String", ], @@ -172,7 +172,7 @@

Method Details

}, "promoteReleaseRule": { # `PromoteRelease` rule will automatically promote a release from the current target to a specified target. # Optional. `PromoteReleaseRule` will automatically promote a release from the current target to a specified target. "condition": { # `AutomationRuleCondition` contains conditions relevant to an `Automation` rule. # Output only. Information around the state of the Automation rule. - "targetsPresentCondition": { # TargetsPresentCondition contains information on any Targets defined in the Delivery Pipeline that do not actually exist. # Optional. Details around targets enumerated in the rule. + "targetsPresentCondition": { # `TargetsPresentCondition` contains information on any Targets referenced in the Delivery Pipeline that do not actually exist. # Optional. Details around targets enumerated in the rule. "missingTargets": [ # The list of Target names that do not exist. For example, `projects/{project_id}/locations/{location_name}/targets/{target_name}`. "A String", ], @@ -187,7 +187,7 @@

Method Details

}, "repairRolloutRule": { # The `RepairRolloutRule` automation rule will automatically repair a failed `Rollout`. # Optional. The `RepairRolloutRule` will automatically repair a failed rollout. "condition": { # `AutomationRuleCondition` contains conditions relevant to an `Automation` rule. # Output only. Information around the state of the 'Automation' rule. - "targetsPresentCondition": { # TargetsPresentCondition contains information on any Targets defined in the Delivery Pipeline that do not actually exist. # Optional. Details around targets enumerated in the rule. + "targetsPresentCondition": { # `TargetsPresentCondition` contains information on any Targets referenced in the Delivery Pipeline that do not actually exist. # Optional. Details around targets enumerated in the rule. "missingTargets": [ # The list of Target names that do not exist. For example, `projects/{project_id}/locations/{location_name}/targets/{target_name}`. "A String", ], @@ -202,7 +202,7 @@

Method Details

"repairModes": [ # Required. Defines the types of automatic repair actions for failed jobs. { # Configuration of the repair action. "retry": { # Retries the failed job. # Optional. Retries a failed job. - "attempts": "A String", # Required. Total number of retries. Retry will skipped if set to 0; The minimum value is 1, and the maximum value is 10. + "attempts": "A String", # Required. Total number of retries. Retry is skipped if set to 0; The minimum value is 1, and the maximum value is 10. "backoffMode": "A String", # Optional. The pattern of how wait time will be increased. Default is linear. Backoff mode will be ignored if `wait` is 0. "wait": "A String", # Optional. How long to wait for the first retry. Default is 0, and the maximum value is 14d. }, @@ -234,7 +234,7 @@

Method Details

}, "createTime": "A String", # Output only. Time at which the `AutomationRun` was created. "etag": "A String", # Output only. The weak etag of the `AutomationRun` resource. This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. - "expireTime": "A String", # Output only. Time the `AutomationRun` will expire. An `AutomationRun` will expire after 14 days from its creation date. + "expireTime": "A String", # Output only. Time the `AutomationRun` expires. An `AutomationRun` expires after 14 days from its creation date. "name": "A String", # Output only. Name of the `AutomationRun`. Format is `projects/{project}/locations/{location}/deliveryPipelines/{delivery_pipeline}/automationRuns/{automation_run}`. "promoteReleaseOperation": { # Contains the information of an automated promote-release operation. # Output only. Promotes a release to a specified 'Target'. "phase": "A String", # Output only. The starting phase of the rollout created by this operation. @@ -244,6 +244,8 @@

Method Details

}, "repairRolloutOperation": { # Contains the information for an automated `repair rollout` operation. # Output only. Repairs a failed 'Rollout'. "currentRepairModeIndex": "A String", # Output only. The index of the current repair action in the repair sequence. + "jobId": "A String", # Output only. The job ID for the Job to repair. + "phaseId": "A String", # Output only. The phase ID of the phase that includes the job being repaired. "repairPhases": [ # Output only. Records of the repair attempts. Each repair phase may have multiple retry attempts or single rollback attempt. { # RepairPhase tracks the repair attempts that have been made for each `RepairMode` specified in the `Automation` resource. "retry": { # RetryPhase contains the retry attempts and the metadata for initiating a new attempt. # Output only. Records of the retry attempts for retry repair mode. @@ -273,7 +275,7 @@

Method Details

"ruleId": "A String", # Output only. The ID of the automation rule that initiated the operation. "serviceAccount": "A String", # Output only. Email address of the user-managed IAM service account that performs the operations against Cloud Deploy resources. "state": "A String", # Output only. Current state of the `AutomationRun`. - "stateDescription": "A String", # Output only. Explains the current state of the `AutomationRun`. Present only an explanation is needed. + "stateDescription": "A String", # Output only. Explains the current state of the `AutomationRun`. Present only when an explanation is needed. "targetId": "A String", # Output only. The ID of the target that represents the promotion stage that initiates the `AutomationRun`. The value of this field is the last segment of a target name. "updateTime": "A String", # Output only. Time at which the automationRun was updated. "waitUntilTime": "A String", # Output only. Earliest time the `AutomationRun` will attempt to resume. Wait-time is configured by `wait` in automation rule. @@ -285,7 +287,7 @@

Method Details

Lists AutomationRuns in a given project and location.
 
 Args:
-  parent: string, Required. The parent, which owns this collection of automationRuns. Format must be `projects/{project}/locations/{location}/deliveryPipelines/{delivery_pipeline}`. (required)
+  parent: string, Required. The parent `Delivery Pipeline`, which owns this collection of automationRuns. Format must be `projects/{project}/locations/{location}/deliveryPipelines/{delivery_pipeline}`. (required)
   filter: string, Filter automationRuns to be returned. All fields can be used in the filter.
   orderBy: string, Field to sort by.
   pageSize: integer, The maximum number of automationRuns to return. The service may return fewer than this value. If unspecified, at most 50 automationRuns will be returned. The maximum value is 1000; values above 1000 will be set to 1000.
@@ -300,15 +302,15 @@ 

Method Details

{ # The response object from `ListAutomationRuns`. "automationRuns": [ # The `AutomationRuns` objects. - { # An `AutomationRun` resource in the Cloud Deploy API. An `AutomationRun` represents an automation execution instance of an automation rule. + { # An `AutomationRun` resource in the Cloud Deploy API. An `AutomationRun` represents an execution instance of an automation rule. "advanceRolloutOperation": { # Contains the information of an automated advance-rollout operation. # Output only. Advances a rollout to the next phase. - "destinationPhase": "A String", # Output only. The phase to which the rollout will be advanced to. + "destinationPhase": "A String", # Output only. The phase the rollout will be advanced to. "rollout": "A String", # Output only. The name of the rollout that initiates the `AutomationRun`. "sourcePhase": "A String", # Output only. The phase of a deployment that initiated the operation. "wait": "A String", # Output only. How long the operation will be paused. }, "automationId": "A String", # Output only. The ID of the automation that initiated the operation. - "automationSnapshot": { # An `Automation` resource in the Cloud Deploy API. An `Automation` enables the automation of manually driven actions for a Delivery Pipeline, which includes Release promotion amongst Targets, Rollout repair and Rollout deployment strategy advancement. The intention of Automation is to reduce manual intervention in the continuous delivery process. # Output only. Snapshot of the Automation taken at AutomationRun creation time. + "automationSnapshot": { # An `Automation` resource in the Cloud Deploy API. An `Automation` enables the automation of manually driven actions for a Delivery Pipeline, which includes Release promotion among Targets, Rollout repair and Rollout deployment strategy advancement. The intention of Automation is to reduce manual intervention in the continuous delivery process. # Output only. Snapshot of the Automation taken at AutomationRun creation time. "annotations": { # Optional. User annotations. These attributes can only be set and used by the user, and not by Cloud Deploy. Annotations must meet the following constraints: * Annotations are key/value pairs. * Valid annotation keys have two segments: an optional prefix and name, separated by a slash (`/`). * The name segment is required and must be 63 characters or less, beginning and ending with an alphanumeric character (`[a-z0-9A-Z]`) with dashes (`-`), underscores (`_`), dots (`.`), and alphanumerics between. * The prefix is optional. If specified, the prefix must be a DNS subdomain: a series of DNS labels separated by dots(`.`), not longer than 253 characters in total, followed by a slash (`/`). See https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/#syntax-and-character-set for more details. "a_key": "A String", }, @@ -323,7 +325,7 @@

Method Details

{ # `AutomationRule` defines the automation activities. "advanceRolloutRule": { # The `AdvanceRollout` automation rule will automatically advance a successful Rollout to the next phase. # Optional. The `AdvanceRolloutRule` will automatically advance a successful Rollout. "condition": { # `AutomationRuleCondition` contains conditions relevant to an `Automation` rule. # Output only. Information around the state of the Automation rule. - "targetsPresentCondition": { # TargetsPresentCondition contains information on any Targets defined in the Delivery Pipeline that do not actually exist. # Optional. Details around targets enumerated in the rule. + "targetsPresentCondition": { # `TargetsPresentCondition` contains information on any Targets referenced in the Delivery Pipeline that do not actually exist. # Optional. Details around targets enumerated in the rule. "missingTargets": [ # The list of Target names that do not exist. For example, `projects/{project_id}/locations/{location_name}/targets/{target_name}`. "A String", ], @@ -339,7 +341,7 @@

Method Details

}, "promoteReleaseRule": { # `PromoteRelease` rule will automatically promote a release from the current target to a specified target. # Optional. `PromoteReleaseRule` will automatically promote a release from the current target to a specified target. "condition": { # `AutomationRuleCondition` contains conditions relevant to an `Automation` rule. # Output only. Information around the state of the Automation rule. - "targetsPresentCondition": { # TargetsPresentCondition contains information on any Targets defined in the Delivery Pipeline that do not actually exist. # Optional. Details around targets enumerated in the rule. + "targetsPresentCondition": { # `TargetsPresentCondition` contains information on any Targets referenced in the Delivery Pipeline that do not actually exist. # Optional. Details around targets enumerated in the rule. "missingTargets": [ # The list of Target names that do not exist. For example, `projects/{project_id}/locations/{location_name}/targets/{target_name}`. "A String", ], @@ -354,7 +356,7 @@

Method Details

}, "repairRolloutRule": { # The `RepairRolloutRule` automation rule will automatically repair a failed `Rollout`. # Optional. The `RepairRolloutRule` will automatically repair a failed rollout. "condition": { # `AutomationRuleCondition` contains conditions relevant to an `Automation` rule. # Output only. Information around the state of the 'Automation' rule. - "targetsPresentCondition": { # TargetsPresentCondition contains information on any Targets defined in the Delivery Pipeline that do not actually exist. # Optional. Details around targets enumerated in the rule. + "targetsPresentCondition": { # `TargetsPresentCondition` contains information on any Targets referenced in the Delivery Pipeline that do not actually exist. # Optional. Details around targets enumerated in the rule. "missingTargets": [ # The list of Target names that do not exist. For example, `projects/{project_id}/locations/{location_name}/targets/{target_name}`. "A String", ], @@ -369,7 +371,7 @@

Method Details

"repairModes": [ # Required. Defines the types of automatic repair actions for failed jobs. { # Configuration of the repair action. "retry": { # Retries the failed job. # Optional. Retries a failed job. - "attempts": "A String", # Required. Total number of retries. Retry will skipped if set to 0; The minimum value is 1, and the maximum value is 10. + "attempts": "A String", # Required. Total number of retries. Retry is skipped if set to 0; The minimum value is 1, and the maximum value is 10. "backoffMode": "A String", # Optional. The pattern of how wait time will be increased. Default is linear. Backoff mode will be ignored if `wait` is 0. "wait": "A String", # Optional. How long to wait for the first retry. Default is 0, and the maximum value is 14d. }, @@ -401,7 +403,7 @@

Method Details

}, "createTime": "A String", # Output only. Time at which the `AutomationRun` was created. "etag": "A String", # Output only. The weak etag of the `AutomationRun` resource. This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. - "expireTime": "A String", # Output only. Time the `AutomationRun` will expire. An `AutomationRun` will expire after 14 days from its creation date. + "expireTime": "A String", # Output only. Time the `AutomationRun` expires. An `AutomationRun` expires after 14 days from its creation date. "name": "A String", # Output only. Name of the `AutomationRun`. Format is `projects/{project}/locations/{location}/deliveryPipelines/{delivery_pipeline}/automationRuns/{automation_run}`. "promoteReleaseOperation": { # Contains the information of an automated promote-release operation. # Output only. Promotes a release to a specified 'Target'. "phase": "A String", # Output only. The starting phase of the rollout created by this operation. @@ -411,6 +413,8 @@

Method Details

}, "repairRolloutOperation": { # Contains the information for an automated `repair rollout` operation. # Output only. Repairs a failed 'Rollout'. "currentRepairModeIndex": "A String", # Output only. The index of the current repair action in the repair sequence. + "jobId": "A String", # Output only. The job ID for the Job to repair. + "phaseId": "A String", # Output only. The phase ID of the phase that includes the job being repaired. "repairPhases": [ # Output only. Records of the repair attempts. Each repair phase may have multiple retry attempts or single rollback attempt. { # RepairPhase tracks the repair attempts that have been made for each `RepairMode` specified in the `Automation` resource. "retry": { # RetryPhase contains the retry attempts and the metadata for initiating a new attempt. # Output only. Records of the retry attempts for retry repair mode. @@ -440,7 +444,7 @@

Method Details

"ruleId": "A String", # Output only. The ID of the automation rule that initiated the operation. "serviceAccount": "A String", # Output only. Email address of the user-managed IAM service account that performs the operations against Cloud Deploy resources. "state": "A String", # Output only. Current state of the `AutomationRun`. - "stateDescription": "A String", # Output only. Explains the current state of the `AutomationRun`. Present only an explanation is needed. + "stateDescription": "A String", # Output only. Explains the current state of the `AutomationRun`. Present only when an explanation is needed. "targetId": "A String", # Output only. The ID of the target that represents the promotion stage that initiates the `AutomationRun`. The value of this field is the last segment of a target name. "updateTime": "A String", # Output only. Time at which the automationRun was updated. "waitUntilTime": "A String", # Output only. Earliest time the `AutomationRun` will attempt to resume. Wait-time is configured by `wait` in automation rule. diff --git a/docs/dyn/clouddeploy_v1.projects.locations.deliveryPipelines.automations.html b/docs/dyn/clouddeploy_v1.projects.locations.deliveryPipelines.automations.html index 013c3164b0b..149a263ab1a 100644 --- a/docs/dyn/clouddeploy_v1.projects.locations.deliveryPipelines.automations.html +++ b/docs/dyn/clouddeploy_v1.projects.locations.deliveryPipelines.automations.html @@ -110,7 +110,7 @@

Method Details

body: object, The request body. The object takes the form of: -{ # An `Automation` resource in the Cloud Deploy API. An `Automation` enables the automation of manually driven actions for a Delivery Pipeline, which includes Release promotion amongst Targets, Rollout repair and Rollout deployment strategy advancement. The intention of Automation is to reduce manual intervention in the continuous delivery process. +{ # An `Automation` resource in the Cloud Deploy API. An `Automation` enables the automation of manually driven actions for a Delivery Pipeline, which includes Release promotion among Targets, Rollout repair and Rollout deployment strategy advancement. The intention of Automation is to reduce manual intervention in the continuous delivery process. "annotations": { # Optional. User annotations. These attributes can only be set and used by the user, and not by Cloud Deploy. Annotations must meet the following constraints: * Annotations are key/value pairs. * Valid annotation keys have two segments: an optional prefix and name, separated by a slash (`/`). * The name segment is required and must be 63 characters or less, beginning and ending with an alphanumeric character (`[a-z0-9A-Z]`) with dashes (`-`), underscores (`_`), dots (`.`), and alphanumerics between. * The prefix is optional. If specified, the prefix must be a DNS subdomain: a series of DNS labels separated by dots(`.`), not longer than 253 characters in total, followed by a slash (`/`). See https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/#syntax-and-character-set for more details. "a_key": "A String", }, @@ -125,7 +125,7 @@

Method Details

{ # `AutomationRule` defines the automation activities. "advanceRolloutRule": { # The `AdvanceRollout` automation rule will automatically advance a successful Rollout to the next phase. # Optional. The `AdvanceRolloutRule` will automatically advance a successful Rollout. "condition": { # `AutomationRuleCondition` contains conditions relevant to an `Automation` rule. # Output only. Information around the state of the Automation rule. - "targetsPresentCondition": { # TargetsPresentCondition contains information on any Targets defined in the Delivery Pipeline that do not actually exist. # Optional. Details around targets enumerated in the rule. + "targetsPresentCondition": { # `TargetsPresentCondition` contains information on any Targets referenced in the Delivery Pipeline that do not actually exist. # Optional. Details around targets enumerated in the rule. "missingTargets": [ # The list of Target names that do not exist. For example, `projects/{project_id}/locations/{location_name}/targets/{target_name}`. "A String", ], @@ -141,7 +141,7 @@

Method Details

}, "promoteReleaseRule": { # `PromoteRelease` rule will automatically promote a release from the current target to a specified target. # Optional. `PromoteReleaseRule` will automatically promote a release from the current target to a specified target. "condition": { # `AutomationRuleCondition` contains conditions relevant to an `Automation` rule. # Output only. Information around the state of the Automation rule. - "targetsPresentCondition": { # TargetsPresentCondition contains information on any Targets defined in the Delivery Pipeline that do not actually exist. # Optional. Details around targets enumerated in the rule. + "targetsPresentCondition": { # `TargetsPresentCondition` contains information on any Targets referenced in the Delivery Pipeline that do not actually exist. # Optional. Details around targets enumerated in the rule. "missingTargets": [ # The list of Target names that do not exist. For example, `projects/{project_id}/locations/{location_name}/targets/{target_name}`. "A String", ], @@ -156,7 +156,7 @@

Method Details

}, "repairRolloutRule": { # The `RepairRolloutRule` automation rule will automatically repair a failed `Rollout`. # Optional. The `RepairRolloutRule` will automatically repair a failed rollout. "condition": { # `AutomationRuleCondition` contains conditions relevant to an `Automation` rule. # Output only. Information around the state of the 'Automation' rule. - "targetsPresentCondition": { # TargetsPresentCondition contains information on any Targets defined in the Delivery Pipeline that do not actually exist. # Optional. Details around targets enumerated in the rule. + "targetsPresentCondition": { # `TargetsPresentCondition` contains information on any Targets referenced in the Delivery Pipeline that do not actually exist. # Optional. Details around targets enumerated in the rule. "missingTargets": [ # The list of Target names that do not exist. For example, `projects/{project_id}/locations/{location_name}/targets/{target_name}`. "A String", ], @@ -171,7 +171,7 @@

Method Details

"repairModes": [ # Required. Defines the types of automatic repair actions for failed jobs. { # Configuration of the repair action. "retry": { # Retries the failed job. # Optional. Retries a failed job. - "attempts": "A String", # Required. Total number of retries. Retry will skipped if set to 0; The minimum value is 1, and the maximum value is 10. + "attempts": "A String", # Required. Total number of retries. Retry is skipped if set to 0; The minimum value is 1, and the maximum value is 10. "backoffMode": "A String", # Optional. The pattern of how wait time will be increased. Default is linear. Backoff mode will be ignored if `wait` is 0. "wait": "A String", # Optional. How long to wait for the first retry. Default is 0, and the maximum value is 14d. }, @@ -287,7 +287,7 @@

Method Details

Returns: An object of the form: - { # An `Automation` resource in the Cloud Deploy API. An `Automation` enables the automation of manually driven actions for a Delivery Pipeline, which includes Release promotion amongst Targets, Rollout repair and Rollout deployment strategy advancement. The intention of Automation is to reduce manual intervention in the continuous delivery process. + { # An `Automation` resource in the Cloud Deploy API. An `Automation` enables the automation of manually driven actions for a Delivery Pipeline, which includes Release promotion among Targets, Rollout repair and Rollout deployment strategy advancement. The intention of Automation is to reduce manual intervention in the continuous delivery process. "annotations": { # Optional. User annotations. These attributes can only be set and used by the user, and not by Cloud Deploy. Annotations must meet the following constraints: * Annotations are key/value pairs. * Valid annotation keys have two segments: an optional prefix and name, separated by a slash (`/`). * The name segment is required and must be 63 characters or less, beginning and ending with an alphanumeric character (`[a-z0-9A-Z]`) with dashes (`-`), underscores (`_`), dots (`.`), and alphanumerics between. * The prefix is optional. If specified, the prefix must be a DNS subdomain: a series of DNS labels separated by dots(`.`), not longer than 253 characters in total, followed by a slash (`/`). See https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/#syntax-and-character-set for more details. "a_key": "A String", }, @@ -302,7 +302,7 @@

Method Details

{ # `AutomationRule` defines the automation activities. "advanceRolloutRule": { # The `AdvanceRollout` automation rule will automatically advance a successful Rollout to the next phase. # Optional. The `AdvanceRolloutRule` will automatically advance a successful Rollout. "condition": { # `AutomationRuleCondition` contains conditions relevant to an `Automation` rule. # Output only. Information around the state of the Automation rule. - "targetsPresentCondition": { # TargetsPresentCondition contains information on any Targets defined in the Delivery Pipeline that do not actually exist. # Optional. Details around targets enumerated in the rule. + "targetsPresentCondition": { # `TargetsPresentCondition` contains information on any Targets referenced in the Delivery Pipeline that do not actually exist. # Optional. Details around targets enumerated in the rule. "missingTargets": [ # The list of Target names that do not exist. For example, `projects/{project_id}/locations/{location_name}/targets/{target_name}`. "A String", ], @@ -318,7 +318,7 @@

Method Details

}, "promoteReleaseRule": { # `PromoteRelease` rule will automatically promote a release from the current target to a specified target. # Optional. `PromoteReleaseRule` will automatically promote a release from the current target to a specified target. "condition": { # `AutomationRuleCondition` contains conditions relevant to an `Automation` rule. # Output only. Information around the state of the Automation rule. - "targetsPresentCondition": { # TargetsPresentCondition contains information on any Targets defined in the Delivery Pipeline that do not actually exist. # Optional. Details around targets enumerated in the rule. + "targetsPresentCondition": { # `TargetsPresentCondition` contains information on any Targets referenced in the Delivery Pipeline that do not actually exist. # Optional. Details around targets enumerated in the rule. "missingTargets": [ # The list of Target names that do not exist. For example, `projects/{project_id}/locations/{location_name}/targets/{target_name}`. "A String", ], @@ -333,7 +333,7 @@

Method Details

}, "repairRolloutRule": { # The `RepairRolloutRule` automation rule will automatically repair a failed `Rollout`. # Optional. The `RepairRolloutRule` will automatically repair a failed rollout. "condition": { # `AutomationRuleCondition` contains conditions relevant to an `Automation` rule. # Output only. Information around the state of the 'Automation' rule. - "targetsPresentCondition": { # TargetsPresentCondition contains information on any Targets defined in the Delivery Pipeline that do not actually exist. # Optional. Details around targets enumerated in the rule. + "targetsPresentCondition": { # `TargetsPresentCondition` contains information on any Targets referenced in the Delivery Pipeline that do not actually exist. # Optional. Details around targets enumerated in the rule. "missingTargets": [ # The list of Target names that do not exist. For example, `projects/{project_id}/locations/{location_name}/targets/{target_name}`. "A String", ], @@ -348,7 +348,7 @@

Method Details

"repairModes": [ # Required. Defines the types of automatic repair actions for failed jobs. { # Configuration of the repair action. "retry": { # Retries the failed job. # Optional. Retries a failed job. - "attempts": "A String", # Required. Total number of retries. Retry will skipped if set to 0; The minimum value is 1, and the maximum value is 10. + "attempts": "A String", # Required. Total number of retries. Retry is skipped if set to 0; The minimum value is 1, and the maximum value is 10. "backoffMode": "A String", # Optional. The pattern of how wait time will be increased. Default is linear. Backoff mode will be ignored if `wait` is 0. "wait": "A String", # Optional. How long to wait for the first retry. Default is 0, and the maximum value is 14d. }, @@ -385,7 +385,7 @@

Method Details

Lists Automations in a given project and location.
 
 Args:
-  parent: string, Required. The parent, which owns this collection of automations. Format must be `projects/{project_id}/locations/{location_name}/deliveryPipelines/{pipeline_name}`. (required)
+  parent: string, Required. The parent `Delivery Pipeline`, which owns this collection of automations. Format must be `projects/{project_id}/locations/{location_name}/deliveryPipelines/{pipeline_name}`. (required)
   filter: string, Filter automations to be returned. All fields can be used in the filter.
   orderBy: string, Field to sort by.
   pageSize: integer, The maximum number of automations to return. The service may return fewer than this value. If unspecified, at most 50 automations will be returned. The maximum value is 1000; values above 1000 will be set to 1000.
@@ -399,8 +399,8 @@ 

Method Details

An object of the form: { # The response object from `ListAutomations`. - "automations": [ # The `Automations` objects. - { # An `Automation` resource in the Cloud Deploy API. An `Automation` enables the automation of manually driven actions for a Delivery Pipeline, which includes Release promotion amongst Targets, Rollout repair and Rollout deployment strategy advancement. The intention of Automation is to reduce manual intervention in the continuous delivery process. + "automations": [ # The `Automation` objects. + { # An `Automation` resource in the Cloud Deploy API. An `Automation` enables the automation of manually driven actions for a Delivery Pipeline, which includes Release promotion among Targets, Rollout repair and Rollout deployment strategy advancement. The intention of Automation is to reduce manual intervention in the continuous delivery process. "annotations": { # Optional. User annotations. These attributes can only be set and used by the user, and not by Cloud Deploy. Annotations must meet the following constraints: * Annotations are key/value pairs. * Valid annotation keys have two segments: an optional prefix and name, separated by a slash (`/`). * The name segment is required and must be 63 characters or less, beginning and ending with an alphanumeric character (`[a-z0-9A-Z]`) with dashes (`-`), underscores (`_`), dots (`.`), and alphanumerics between. * The prefix is optional. If specified, the prefix must be a DNS subdomain: a series of DNS labels separated by dots(`.`), not longer than 253 characters in total, followed by a slash (`/`). See https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/#syntax-and-character-set for more details. "a_key": "A String", }, @@ -415,7 +415,7 @@

Method Details

{ # `AutomationRule` defines the automation activities. "advanceRolloutRule": { # The `AdvanceRollout` automation rule will automatically advance a successful Rollout to the next phase. # Optional. The `AdvanceRolloutRule` will automatically advance a successful Rollout. "condition": { # `AutomationRuleCondition` contains conditions relevant to an `Automation` rule. # Output only. Information around the state of the Automation rule. - "targetsPresentCondition": { # TargetsPresentCondition contains information on any Targets defined in the Delivery Pipeline that do not actually exist. # Optional. Details around targets enumerated in the rule. + "targetsPresentCondition": { # `TargetsPresentCondition` contains information on any Targets referenced in the Delivery Pipeline that do not actually exist. # Optional. Details around targets enumerated in the rule. "missingTargets": [ # The list of Target names that do not exist. For example, `projects/{project_id}/locations/{location_name}/targets/{target_name}`. "A String", ], @@ -431,7 +431,7 @@

Method Details

}, "promoteReleaseRule": { # `PromoteRelease` rule will automatically promote a release from the current target to a specified target. # Optional. `PromoteReleaseRule` will automatically promote a release from the current target to a specified target. "condition": { # `AutomationRuleCondition` contains conditions relevant to an `Automation` rule. # Output only. Information around the state of the Automation rule. - "targetsPresentCondition": { # TargetsPresentCondition contains information on any Targets defined in the Delivery Pipeline that do not actually exist. # Optional. Details around targets enumerated in the rule. + "targetsPresentCondition": { # `TargetsPresentCondition` contains information on any Targets referenced in the Delivery Pipeline that do not actually exist. # Optional. Details around targets enumerated in the rule. "missingTargets": [ # The list of Target names that do not exist. For example, `projects/{project_id}/locations/{location_name}/targets/{target_name}`. "A String", ], @@ -446,7 +446,7 @@

Method Details

}, "repairRolloutRule": { # The `RepairRolloutRule` automation rule will automatically repair a failed `Rollout`. # Optional. The `RepairRolloutRule` will automatically repair a failed rollout. "condition": { # `AutomationRuleCondition` contains conditions relevant to an `Automation` rule. # Output only. Information around the state of the 'Automation' rule. - "targetsPresentCondition": { # TargetsPresentCondition contains information on any Targets defined in the Delivery Pipeline that do not actually exist. # Optional. Details around targets enumerated in the rule. + "targetsPresentCondition": { # `TargetsPresentCondition` contains information on any Targets referenced in the Delivery Pipeline that do not actually exist. # Optional. Details around targets enumerated in the rule. "missingTargets": [ # The list of Target names that do not exist. For example, `projects/{project_id}/locations/{location_name}/targets/{target_name}`. "A String", ], @@ -461,7 +461,7 @@

Method Details

"repairModes": [ # Required. Defines the types of automatic repair actions for failed jobs. { # Configuration of the repair action. "retry": { # Retries the failed job. # Optional. Retries a failed job. - "attempts": "A String", # Required. Total number of retries. Retry will skipped if set to 0; The minimum value is 1, and the maximum value is 10. + "attempts": "A String", # Required. Total number of retries. Retry is skipped if set to 0; The minimum value is 1, and the maximum value is 10. "backoffMode": "A String", # Optional. The pattern of how wait time will be increased. Default is linear. Backoff mode will be ignored if `wait` is 0. "wait": "A String", # Optional. How long to wait for the first retry. Default is 0, and the maximum value is 14d. }, @@ -522,7 +522,7 @@

Method Details

body: object, The request body. The object takes the form of: -{ # An `Automation` resource in the Cloud Deploy API. An `Automation` enables the automation of manually driven actions for a Delivery Pipeline, which includes Release promotion amongst Targets, Rollout repair and Rollout deployment strategy advancement. The intention of Automation is to reduce manual intervention in the continuous delivery process. +{ # An `Automation` resource in the Cloud Deploy API. An `Automation` enables the automation of manually driven actions for a Delivery Pipeline, which includes Release promotion among Targets, Rollout repair and Rollout deployment strategy advancement. The intention of Automation is to reduce manual intervention in the continuous delivery process. "annotations": { # Optional. User annotations. These attributes can only be set and used by the user, and not by Cloud Deploy. Annotations must meet the following constraints: * Annotations are key/value pairs. * Valid annotation keys have two segments: an optional prefix and name, separated by a slash (`/`). * The name segment is required and must be 63 characters or less, beginning and ending with an alphanumeric character (`[a-z0-9A-Z]`) with dashes (`-`), underscores (`_`), dots (`.`), and alphanumerics between. * The prefix is optional. If specified, the prefix must be a DNS subdomain: a series of DNS labels separated by dots(`.`), not longer than 253 characters in total, followed by a slash (`/`). See https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/#syntax-and-character-set for more details. "a_key": "A String", }, @@ -537,7 +537,7 @@

Method Details

{ # `AutomationRule` defines the automation activities. "advanceRolloutRule": { # The `AdvanceRollout` automation rule will automatically advance a successful Rollout to the next phase. # Optional. The `AdvanceRolloutRule` will automatically advance a successful Rollout. "condition": { # `AutomationRuleCondition` contains conditions relevant to an `Automation` rule. # Output only. Information around the state of the Automation rule. - "targetsPresentCondition": { # TargetsPresentCondition contains information on any Targets defined in the Delivery Pipeline that do not actually exist. # Optional. Details around targets enumerated in the rule. + "targetsPresentCondition": { # `TargetsPresentCondition` contains information on any Targets referenced in the Delivery Pipeline that do not actually exist. # Optional. Details around targets enumerated in the rule. "missingTargets": [ # The list of Target names that do not exist. For example, `projects/{project_id}/locations/{location_name}/targets/{target_name}`. "A String", ], @@ -553,7 +553,7 @@

Method Details

}, "promoteReleaseRule": { # `PromoteRelease` rule will automatically promote a release from the current target to a specified target. # Optional. `PromoteReleaseRule` will automatically promote a release from the current target to a specified target. "condition": { # `AutomationRuleCondition` contains conditions relevant to an `Automation` rule. # Output only. Information around the state of the Automation rule. - "targetsPresentCondition": { # TargetsPresentCondition contains information on any Targets defined in the Delivery Pipeline that do not actually exist. # Optional. Details around targets enumerated in the rule. + "targetsPresentCondition": { # `TargetsPresentCondition` contains information on any Targets referenced in the Delivery Pipeline that do not actually exist. # Optional. Details around targets enumerated in the rule. "missingTargets": [ # The list of Target names that do not exist. For example, `projects/{project_id}/locations/{location_name}/targets/{target_name}`. "A String", ], @@ -568,7 +568,7 @@

Method Details

}, "repairRolloutRule": { # The `RepairRolloutRule` automation rule will automatically repair a failed `Rollout`. # Optional. The `RepairRolloutRule` will automatically repair a failed rollout. "condition": { # `AutomationRuleCondition` contains conditions relevant to an `Automation` rule. # Output only. Information around the state of the 'Automation' rule. - "targetsPresentCondition": { # TargetsPresentCondition contains information on any Targets defined in the Delivery Pipeline that do not actually exist. # Optional. Details around targets enumerated in the rule. + "targetsPresentCondition": { # `TargetsPresentCondition` contains information on any Targets referenced in the Delivery Pipeline that do not actually exist. # Optional. Details around targets enumerated in the rule. "missingTargets": [ # The list of Target names that do not exist. For example, `projects/{project_id}/locations/{location_name}/targets/{target_name}`. "A String", ], @@ -583,7 +583,7 @@

Method Details

"repairModes": [ # Required. Defines the types of automatic repair actions for failed jobs. { # Configuration of the repair action. "retry": { # Retries the failed job. # Optional. Retries a failed job. - "attempts": "A String", # Required. Total number of retries. Retry will skipped if set to 0; The minimum value is 1, and the maximum value is 10. + "attempts": "A String", # Required. Total number of retries. Retry is skipped if set to 0; The minimum value is 1, and the maximum value is 10. "backoffMode": "A String", # Optional. The pattern of how wait time will be increased. Default is linear. Backoff mode will be ignored if `wait` is 0. "wait": "A String", # Optional. How long to wait for the first retry. Default is 0, and the maximum value is 14d. }, diff --git a/docs/dyn/clouddeploy_v1.projects.locations.deliveryPipelines.html b/docs/dyn/clouddeploy_v1.projects.locations.deliveryPipelines.html index 460d822c035..fce5aa7ca26 100644 --- a/docs/dyn/clouddeploy_v1.projects.locations.deliveryPipelines.html +++ b/docs/dyn/clouddeploy_v1.projects.locations.deliveryPipelines.html @@ -146,7 +146,7 @@

Method Details

"status": True or False, # True if the Pipeline is in a valid state. Otherwise at least one condition in `PipelineCondition` is in an invalid state. Iterate over those conditions and see which condition(s) has status = false to find out what is wrong with the Pipeline. "updateTime": "A String", # Last time the condition was updated. }, - "targetsPresentCondition": { # TargetsPresentCondition contains information on any Targets defined in the Delivery Pipeline that do not actually exist. # Details around targets enumerated in the pipeline. + "targetsPresentCondition": { # `TargetsPresentCondition` contains information on any Targets referenced in the Delivery Pipeline that do not actually exist. # Details around targets enumerated in the pipeline. "missingTargets": [ # The list of Target names that do not exist. For example, `projects/{project_id}/locations/{location_name}/targets/{target_name}`. "A String", ], @@ -224,6 +224,15 @@

Method Details

"runtimeConfig": { # RuntimeConfig contains the runtime specific configurations for a deployment strategy. # Optional. Runtime specific configurations for the deployment strategy. The runtime configuration is used to determine how Cloud Deploy will split traffic to enable a progressive deployment. "cloudRun": { # CloudRunConfig contains the Cloud Run runtime configuration. # Cloud Run runtime configuration. "automaticTrafficControl": True or False, # Whether Cloud Deploy should update the traffic stanza in a Cloud Run Service on the user's behalf to facilitate traffic splitting. This is required to be true for CanaryDeployments, but optional for CustomCanaryDeployments. + "canaryRevisionTags": [ # Optional. A list of tags that are added to the canary revision while the canary deployment is in progress. + "A String", + ], + "priorRevisionTags": [ # Optional. A list of tags that are added to the prior revision while the canary deployment is in progress. + "A String", + ], + "stableRevisionTags": [ # Optional. A list of tags that are added to the final stable revision after the canary deployment is completed. + "A String", + ], }, "kubernetes": { # KubernetesConfig contains the Kubernetes runtime configuration. # Kubernetes runtime configuration. "gatewayServiceMesh": { # Information about the Kubernetes Gateway API service mesh configuration. # Kubernetes Gateway API service mesh configuration. @@ -358,7 +367,7 @@

Method Details

"status": True or False, # True if the Pipeline is in a valid state. Otherwise at least one condition in `PipelineCondition` is in an invalid state. Iterate over those conditions and see which condition(s) has status = false to find out what is wrong with the Pipeline. "updateTime": "A String", # Last time the condition was updated. }, - "targetsPresentCondition": { # TargetsPresentCondition contains information on any Targets defined in the Delivery Pipeline that do not actually exist. # Details around targets enumerated in the pipeline. + "targetsPresentCondition": { # `TargetsPresentCondition` contains information on any Targets referenced in the Delivery Pipeline that do not actually exist. # Details around targets enumerated in the pipeline. "missingTargets": [ # The list of Target names that do not exist. For example, `projects/{project_id}/locations/{location_name}/targets/{target_name}`. "A String", ], @@ -436,6 +445,15 @@

Method Details

"runtimeConfig": { # RuntimeConfig contains the runtime specific configurations for a deployment strategy. # Optional. Runtime specific configurations for the deployment strategy. The runtime configuration is used to determine how Cloud Deploy will split traffic to enable a progressive deployment. "cloudRun": { # CloudRunConfig contains the Cloud Run runtime configuration. # Cloud Run runtime configuration. "automaticTrafficControl": True or False, # Whether Cloud Deploy should update the traffic stanza in a Cloud Run Service on the user's behalf to facilitate traffic splitting. This is required to be true for CanaryDeployments, but optional for CustomCanaryDeployments. + "canaryRevisionTags": [ # Optional. A list of tags that are added to the canary revision while the canary deployment is in progress. + "A String", + ], + "priorRevisionTags": [ # Optional. A list of tags that are added to the prior revision while the canary deployment is in progress. + "A String", + ], + "stableRevisionTags": [ # Optional. A list of tags that are added to the final stable revision after the canary deployment is completed. + "A String", + ], }, "kubernetes": { # KubernetesConfig contains the Kubernetes runtime configuration. # Kubernetes runtime configuration. "gatewayServiceMesh": { # Information about the Kubernetes Gateway API service mesh configuration. # Kubernetes Gateway API service mesh configuration. @@ -553,7 +571,7 @@

Method Details

"status": True or False, # True if the Pipeline is in a valid state. Otherwise at least one condition in `PipelineCondition` is in an invalid state. Iterate over those conditions and see which condition(s) has status = false to find out what is wrong with the Pipeline. "updateTime": "A String", # Last time the condition was updated. }, - "targetsPresentCondition": { # TargetsPresentCondition contains information on any Targets defined in the Delivery Pipeline that do not actually exist. # Details around targets enumerated in the pipeline. + "targetsPresentCondition": { # `TargetsPresentCondition` contains information on any Targets referenced in the Delivery Pipeline that do not actually exist. # Details around targets enumerated in the pipeline. "missingTargets": [ # The list of Target names that do not exist. For example, `projects/{project_id}/locations/{location_name}/targets/{target_name}`. "A String", ], @@ -631,6 +649,15 @@

Method Details

"runtimeConfig": { # RuntimeConfig contains the runtime specific configurations for a deployment strategy. # Optional. Runtime specific configurations for the deployment strategy. The runtime configuration is used to determine how Cloud Deploy will split traffic to enable a progressive deployment. "cloudRun": { # CloudRunConfig contains the Cloud Run runtime configuration. # Cloud Run runtime configuration. "automaticTrafficControl": True or False, # Whether Cloud Deploy should update the traffic stanza in a Cloud Run Service on the user's behalf to facilitate traffic splitting. This is required to be true for CanaryDeployments, but optional for CustomCanaryDeployments. + "canaryRevisionTags": [ # Optional. A list of tags that are added to the canary revision while the canary deployment is in progress. + "A String", + ], + "priorRevisionTags": [ # Optional. A list of tags that are added to the prior revision while the canary deployment is in progress. + "A String", + ], + "stableRevisionTags": [ # Optional. A list of tags that are added to the final stable revision after the canary deployment is completed. + "A String", + ], }, "kubernetes": { # KubernetesConfig contains the Kubernetes runtime configuration. # Kubernetes runtime configuration. "gatewayServiceMesh": { # Information about the Kubernetes Gateway API service mesh configuration. # Kubernetes Gateway API service mesh configuration. @@ -709,7 +736,7 @@

Method Details

"status": True or False, # True if the Pipeline is in a valid state. Otherwise at least one condition in `PipelineCondition` is in an invalid state. Iterate over those conditions and see which condition(s) has status = false to find out what is wrong with the Pipeline. "updateTime": "A String", # Last time the condition was updated. }, - "targetsPresentCondition": { # TargetsPresentCondition contains information on any Targets defined in the Delivery Pipeline that do not actually exist. # Details around targets enumerated in the pipeline. + "targetsPresentCondition": { # `TargetsPresentCondition` contains information on any Targets referenced in the Delivery Pipeline that do not actually exist. # Details around targets enumerated in the pipeline. "missingTargets": [ # The list of Target names that do not exist. For example, `projects/{project_id}/locations/{location_name}/targets/{target_name}`. "A String", ], @@ -787,6 +814,15 @@

Method Details

"runtimeConfig": { # RuntimeConfig contains the runtime specific configurations for a deployment strategy. # Optional. Runtime specific configurations for the deployment strategy. The runtime configuration is used to determine how Cloud Deploy will split traffic to enable a progressive deployment. "cloudRun": { # CloudRunConfig contains the Cloud Run runtime configuration. # Cloud Run runtime configuration. "automaticTrafficControl": True or False, # Whether Cloud Deploy should update the traffic stanza in a Cloud Run Service on the user's behalf to facilitate traffic splitting. This is required to be true for CanaryDeployments, but optional for CustomCanaryDeployments. + "canaryRevisionTags": [ # Optional. A list of tags that are added to the canary revision while the canary deployment is in progress. + "A String", + ], + "priorRevisionTags": [ # Optional. A list of tags that are added to the prior revision while the canary deployment is in progress. + "A String", + ], + "stableRevisionTags": [ # Optional. A list of tags that are added to the final stable revision after the canary deployment is completed. + "A String", + ], }, "kubernetes": { # KubernetesConfig contains the Kubernetes runtime configuration. # Kubernetes runtime configuration. "gatewayServiceMesh": { # Information about the Kubernetes Gateway API service mesh configuration. # Kubernetes Gateway API service mesh configuration. @@ -908,6 +944,11 @@

Method Details

"A String", ], }, + "custom": { # CustomMetadata contains information from a user defined operation. # Output only. Custom metadata provided by user defined `Rollout` operations. + "values": { # Output only. Key-value pairs provided by the user defined operation. + "a_key": "A String", + }, + }, }, "name": "A String", # Optional. Name of the `Rollout`. Format is `projects/{project}/locations/{location}/deliveryPipelines/{deliveryPipeline}/releases/{release}/rollouts/a-z{0,62}`. "phases": [ # Output only. The phases that represent the workflows of this `Rollout`. @@ -1132,6 +1173,11 @@

Method Details

"A String", ], }, + "custom": { # CustomMetadata contains information from a user defined operation. # Output only. Custom metadata provided by user defined `Rollout` operations. + "values": { # Output only. Key-value pairs provided by the user defined operation. + "a_key": "A String", + }, + }, }, "name": "A String", # Optional. Name of the `Rollout`. Format is `projects/{project}/locations/{location}/deliveryPipelines/{deliveryPipeline}/releases/{release}/rollouts/a-z{0,62}`. "phases": [ # Output only. The phases that represent the workflows of this `Rollout`. diff --git a/docs/dyn/clouddeploy_v1.projects.locations.deliveryPipelines.releases.html b/docs/dyn/clouddeploy_v1.projects.locations.deliveryPipelines.releases.html index e348612e882..b2cbc870d96 100644 --- a/docs/dyn/clouddeploy_v1.projects.locations.deliveryPipelines.releases.html +++ b/docs/dyn/clouddeploy_v1.projects.locations.deliveryPipelines.releases.html @@ -151,14 +151,51 @@

Method Details

"releaseReadyCondition": { # ReleaseReadyCondition contains information around the status of the Release. If a release is not ready, you cannot create a rollout with the release. # Details around the Releases's overall status. "status": True or False, # True if the Release is in a valid state. Otherwise at least one condition in `ReleaseCondition` is in an invalid state. Iterate over those conditions and see which condition(s) has status = false to find out what is wrong with the Release. }, - "skaffoldSupportedCondition": { # SkaffoldSupportedCondition contains information about when support for the release's version of skaffold ends. # Details around the support state of the release's skaffold version. - "maintenanceModeTime": "A String", # The time at which this release's version of skaffold will enter maintenance mode. - "skaffoldSupportState": "A String", # The skaffold support state for this release's version of skaffold. - "status": True or False, # True if the version of skaffold used by this release is supported. - "supportExpirationTime": "A String", # The time at which this release's version of skaffold will no longer be supported. + "skaffoldSupportedCondition": { # SkaffoldSupportedCondition contains information about when support for the release's version of Skaffold ends. # Details around the support state of the release's Skaffold version. + "maintenanceModeTime": "A String", # The time at which this release's version of Skaffold will enter maintenance mode. + "skaffoldSupportState": "A String", # The Skaffold support state for this release's version of Skaffold. + "status": True or False, # True if the version of Skaffold used by this release is supported. + "supportExpirationTime": "A String", # The time at which this release's version of Skaffold will no longer be supported. }, }, "createTime": "A String", # Output only. Time at which the `Release` was created. + "customTargetTypeSnapshots": [ # Output only. Snapshot of the custom target types referenced by the targets taken at release creation time. + { # A `CustomTargetType` resource in the Cloud Deploy API. A `CustomTargetType` defines a type of custom target that can be referenced in a `Target` in order to facilitate deploying to a runtime that does not have a 1P integration with Cloud Deploy. + "annotations": { # Optional. User annotations. These attributes can only be set and used by the user, and not by Cloud Deploy. See https://google.aip.dev/128#annotations for more details such as format and size limitations. + "a_key": "A String", + }, + "createTime": "A String", # Output only. Time at which the `CustomTargetType` was created. + "customActions": { # CustomTargetSkaffoldActions represents the `CustomTargetType` configuration using Skaffold custom actions. # Configures render and deploy for the `CustomTargetType` using Skaffold custom actions. + "deployAction": "A String", # Required. The Skaffold custom action responsible for deploy operations. + "includeSkaffoldModules": [ # Optional. List of Skaffold modules Cloud Deploy will include in the Skaffold Config as required before performing diagnose. + { # Skaffold Config modules and their remote source. + "configs": [ # Optional. The Skaffold Config modules to use from the specified source. + "A String", + ], + "git": { # Git repository containing Skaffold Config modules. # Remote git repository containing the Skaffold Config modules. + "path": "A String", # Optional. Relative path from the repository root to the Skaffold file. + "ref": "A String", # Optional. Git ref the package should be cloned from. + "repo": "A String", # Required. Git repository the package should be cloned from. + }, + "googleCloudStorage": { # Cloud Storage bucket containing Skaffold Config modules. # Cloud Storage bucket containing the Skaffold Config modules. + "path": "A String", # Optional. Relative path from the source to the Skaffold file. + "source": "A String", # Required. Cloud Storage source paths to copy recursively. For example, providing "gs://my-bucket/dir/configs/*" will result in Skaffold copying all files within the "dir/configs" directory in the bucket "my-bucket". + }, + }, + ], + "renderAction": "A String", # Optional. The Skaffold custom action responsible for render operations. If not provided then Cloud Deploy will perform the render operations via `skaffold render`. + }, + "customTargetTypeId": "A String", # Output only. Resource id of the `CustomTargetType`. + "description": "A String", # Optional. Description of the `CustomTargetType`. Max length is 255 characters. + "etag": "A String", # Optional. This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. + "labels": { # Optional. Labels are attributes that can be set and used by both the user and by Cloud Deploy. Labels must meet the following constraints: * Keys and values can contain only lowercase letters, numeric characters, underscores, and dashes. * All characters must use UTF-8 encoding, and international characters are allowed. * Keys must start with a lowercase letter or international character. * Each resource is limited to a maximum of 64 labels. Both keys and values are additionally constrained to be <= 128 bytes. + "a_key": "A String", + }, + "name": "A String", # Optional. Name of the `CustomTargetType`. Format is `projects/{project}/locations/{location}/customTargetTypes/a-z{0,62}`. + "uid": "A String", # Output only. Unique identifier of the `CustomTargetType`. + "updateTime": "A String", # Output only. Most recent time at which the `CustomTargetType` was updated. + }, + ], "deliveryPipelineSnapshot": { # A `DeliveryPipeline` resource in the Cloud Deploy API. A `DeliveryPipeline` defines a pipeline through which a Skaffold configuration can progress. # Output only. Snapshot of the parent pipeline taken at release creation time. "annotations": { # User annotations. These attributes can only be set and used by the user, and not by Cloud Deploy. "a_key": "A String", @@ -168,7 +205,7 @@

Method Details

"status": True or False, # True if the Pipeline is in a valid state. Otherwise at least one condition in `PipelineCondition` is in an invalid state. Iterate over those conditions and see which condition(s) has status = false to find out what is wrong with the Pipeline. "updateTime": "A String", # Last time the condition was updated. }, - "targetsPresentCondition": { # TargetsPresentCondition contains information on any Targets defined in the Delivery Pipeline that do not actually exist. # Details around targets enumerated in the pipeline. + "targetsPresentCondition": { # `TargetsPresentCondition` contains information on any Targets referenced in the Delivery Pipeline that do not actually exist. # Details around targets enumerated in the pipeline. "missingTargets": [ # The list of Target names that do not exist. For example, `projects/{project_id}/locations/{location_name}/targets/{target_name}`. "A String", ], @@ -246,6 +283,15 @@

Method Details

"runtimeConfig": { # RuntimeConfig contains the runtime specific configurations for a deployment strategy. # Optional. Runtime specific configurations for the deployment strategy. The runtime configuration is used to determine how Cloud Deploy will split traffic to enable a progressive deployment. "cloudRun": { # CloudRunConfig contains the Cloud Run runtime configuration. # Cloud Run runtime configuration. "automaticTrafficControl": True or False, # Whether Cloud Deploy should update the traffic stanza in a Cloud Run Service on the user's behalf to facilitate traffic splitting. This is required to be true for CanaryDeployments, but optional for CustomCanaryDeployments. + "canaryRevisionTags": [ # Optional. A list of tags that are added to the canary revision while the canary deployment is in progress. + "A String", + ], + "priorRevisionTags": [ # Optional. A list of tags that are added to the prior revision while the canary deployment is in progress. + "A String", + ], + "stableRevisionTags": [ # Optional. A list of tags that are added to the final stable revision after the canary deployment is completed. + "A String", + ], }, "kubernetes": { # KubernetesConfig contains the Kubernetes runtime configuration. # Kubernetes runtime configuration. "gatewayServiceMesh": { # Information about the Kubernetes Gateway API service mesh configuration. # Kubernetes Gateway API service mesh configuration. @@ -321,6 +367,11 @@

Method Details

"cloudRun": { # CloudRunRenderMetadata contains Cloud Run information associated with a `Release` render. # Output only. Metadata associated with rendering for Cloud Run. "service": "A String", # Output only. The name of the Cloud Run Service in the rendered manifest. Format is `projects/{project}/locations/{location}/services/{service}`. }, + "custom": { # CustomMetadata contains information from a user defined operation. # Output only. Custom metadata provided by user defined render operation. + "values": { # Output only. Key-value pairs provided by the user defined operation. + "a_key": "A String", + }, + }, }, "renderingBuild": "A String", # Output only. The resource name of the Cloud Build `Build` object that is used to render the manifest for this target. Format is `projects/{project}/locations/{location}/builds/{build}`. "renderingState": "A String", # Output only. Current state of the render operation for this Target. @@ -335,6 +386,9 @@

Method Details

"membership": "A String", # Membership of the GKE Hub-registered cluster to which to apply the Skaffold configuration. Format is `projects/{project}/locations/{location}/memberships/{membership_name}`. }, "createTime": "A String", # Output only. Time at which the `Target` was created. + "customTarget": { # Information specifying a Custom Target. # Optional. Information specifying a Custom Target. + "customTargetType": "A String", # Required. The name of the CustomTargetType. Format must be `projects/{project}/locations/{location}/customTargetTypes/{custom_target_type}`. + }, "deployParameters": { # Optional. The deploy parameters to use for this target. "a_key": "A String", }, @@ -446,14 +500,51 @@

Method Details

"releaseReadyCondition": { # ReleaseReadyCondition contains information around the status of the Release. If a release is not ready, you cannot create a rollout with the release. # Details around the Releases's overall status. "status": True or False, # True if the Release is in a valid state. Otherwise at least one condition in `ReleaseCondition` is in an invalid state. Iterate over those conditions and see which condition(s) has status = false to find out what is wrong with the Release. }, - "skaffoldSupportedCondition": { # SkaffoldSupportedCondition contains information about when support for the release's version of skaffold ends. # Details around the support state of the release's skaffold version. - "maintenanceModeTime": "A String", # The time at which this release's version of skaffold will enter maintenance mode. - "skaffoldSupportState": "A String", # The skaffold support state for this release's version of skaffold. - "status": True or False, # True if the version of skaffold used by this release is supported. - "supportExpirationTime": "A String", # The time at which this release's version of skaffold will no longer be supported. + "skaffoldSupportedCondition": { # SkaffoldSupportedCondition contains information about when support for the release's version of Skaffold ends. # Details around the support state of the release's Skaffold version. + "maintenanceModeTime": "A String", # The time at which this release's version of Skaffold will enter maintenance mode. + "skaffoldSupportState": "A String", # The Skaffold support state for this release's version of Skaffold. + "status": True or False, # True if the version of Skaffold used by this release is supported. + "supportExpirationTime": "A String", # The time at which this release's version of Skaffold will no longer be supported. }, }, "createTime": "A String", # Output only. Time at which the `Release` was created. + "customTargetTypeSnapshots": [ # Output only. Snapshot of the custom target types referenced by the targets taken at release creation time. + { # A `CustomTargetType` resource in the Cloud Deploy API. A `CustomTargetType` defines a type of custom target that can be referenced in a `Target` in order to facilitate deploying to a runtime that does not have a 1P integration with Cloud Deploy. + "annotations": { # Optional. User annotations. These attributes can only be set and used by the user, and not by Cloud Deploy. See https://google.aip.dev/128#annotations for more details such as format and size limitations. + "a_key": "A String", + }, + "createTime": "A String", # Output only. Time at which the `CustomTargetType` was created. + "customActions": { # CustomTargetSkaffoldActions represents the `CustomTargetType` configuration using Skaffold custom actions. # Configures render and deploy for the `CustomTargetType` using Skaffold custom actions. + "deployAction": "A String", # Required. The Skaffold custom action responsible for deploy operations. + "includeSkaffoldModules": [ # Optional. List of Skaffold modules Cloud Deploy will include in the Skaffold Config as required before performing diagnose. + { # Skaffold Config modules and their remote source. + "configs": [ # Optional. The Skaffold Config modules to use from the specified source. + "A String", + ], + "git": { # Git repository containing Skaffold Config modules. # Remote git repository containing the Skaffold Config modules. + "path": "A String", # Optional. Relative path from the repository root to the Skaffold file. + "ref": "A String", # Optional. Git ref the package should be cloned from. + "repo": "A String", # Required. Git repository the package should be cloned from. + }, + "googleCloudStorage": { # Cloud Storage bucket containing Skaffold Config modules. # Cloud Storage bucket containing the Skaffold Config modules. + "path": "A String", # Optional. Relative path from the source to the Skaffold file. + "source": "A String", # Required. Cloud Storage source paths to copy recursively. For example, providing "gs://my-bucket/dir/configs/*" will result in Skaffold copying all files within the "dir/configs" directory in the bucket "my-bucket". + }, + }, + ], + "renderAction": "A String", # Optional. The Skaffold custom action responsible for render operations. If not provided then Cloud Deploy will perform the render operations via `skaffold render`. + }, + "customTargetTypeId": "A String", # Output only. Resource id of the `CustomTargetType`. + "description": "A String", # Optional. Description of the `CustomTargetType`. Max length is 255 characters. + "etag": "A String", # Optional. This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. + "labels": { # Optional. Labels are attributes that can be set and used by both the user and by Cloud Deploy. Labels must meet the following constraints: * Keys and values can contain only lowercase letters, numeric characters, underscores, and dashes. * All characters must use UTF-8 encoding, and international characters are allowed. * Keys must start with a lowercase letter or international character. * Each resource is limited to a maximum of 64 labels. Both keys and values are additionally constrained to be <= 128 bytes. + "a_key": "A String", + }, + "name": "A String", # Optional. Name of the `CustomTargetType`. Format is `projects/{project}/locations/{location}/customTargetTypes/a-z{0,62}`. + "uid": "A String", # Output only. Unique identifier of the `CustomTargetType`. + "updateTime": "A String", # Output only. Most recent time at which the `CustomTargetType` was updated. + }, + ], "deliveryPipelineSnapshot": { # A `DeliveryPipeline` resource in the Cloud Deploy API. A `DeliveryPipeline` defines a pipeline through which a Skaffold configuration can progress. # Output only. Snapshot of the parent pipeline taken at release creation time. "annotations": { # User annotations. These attributes can only be set and used by the user, and not by Cloud Deploy. "a_key": "A String", @@ -463,7 +554,7 @@

Method Details

"status": True or False, # True if the Pipeline is in a valid state. Otherwise at least one condition in `PipelineCondition` is in an invalid state. Iterate over those conditions and see which condition(s) has status = false to find out what is wrong with the Pipeline. "updateTime": "A String", # Last time the condition was updated. }, - "targetsPresentCondition": { # TargetsPresentCondition contains information on any Targets defined in the Delivery Pipeline that do not actually exist. # Details around targets enumerated in the pipeline. + "targetsPresentCondition": { # `TargetsPresentCondition` contains information on any Targets referenced in the Delivery Pipeline that do not actually exist. # Details around targets enumerated in the pipeline. "missingTargets": [ # The list of Target names that do not exist. For example, `projects/{project_id}/locations/{location_name}/targets/{target_name}`. "A String", ], @@ -541,6 +632,15 @@

Method Details

"runtimeConfig": { # RuntimeConfig contains the runtime specific configurations for a deployment strategy. # Optional. Runtime specific configurations for the deployment strategy. The runtime configuration is used to determine how Cloud Deploy will split traffic to enable a progressive deployment. "cloudRun": { # CloudRunConfig contains the Cloud Run runtime configuration. # Cloud Run runtime configuration. "automaticTrafficControl": True or False, # Whether Cloud Deploy should update the traffic stanza in a Cloud Run Service on the user's behalf to facilitate traffic splitting. This is required to be true for CanaryDeployments, but optional for CustomCanaryDeployments. + "canaryRevisionTags": [ # Optional. A list of tags that are added to the canary revision while the canary deployment is in progress. + "A String", + ], + "priorRevisionTags": [ # Optional. A list of tags that are added to the prior revision while the canary deployment is in progress. + "A String", + ], + "stableRevisionTags": [ # Optional. A list of tags that are added to the final stable revision after the canary deployment is completed. + "A String", + ], }, "kubernetes": { # KubernetesConfig contains the Kubernetes runtime configuration. # Kubernetes runtime configuration. "gatewayServiceMesh": { # Information about the Kubernetes Gateway API service mesh configuration. # Kubernetes Gateway API service mesh configuration. @@ -616,6 +716,11 @@

Method Details

"cloudRun": { # CloudRunRenderMetadata contains Cloud Run information associated with a `Release` render. # Output only. Metadata associated with rendering for Cloud Run. "service": "A String", # Output only. The name of the Cloud Run Service in the rendered manifest. Format is `projects/{project}/locations/{location}/services/{service}`. }, + "custom": { # CustomMetadata contains information from a user defined operation. # Output only. Custom metadata provided by user defined render operation. + "values": { # Output only. Key-value pairs provided by the user defined operation. + "a_key": "A String", + }, + }, }, "renderingBuild": "A String", # Output only. The resource name of the Cloud Build `Build` object that is used to render the manifest for this target. Format is `projects/{project}/locations/{location}/builds/{build}`. "renderingState": "A String", # Output only. Current state of the render operation for this Target. @@ -630,6 +735,9 @@

Method Details

"membership": "A String", # Membership of the GKE Hub-registered cluster to which to apply the Skaffold configuration. Format is `projects/{project}/locations/{location}/memberships/{membership_name}`. }, "createTime": "A String", # Output only. Time at which the `Target` was created. + "customTarget": { # Information specifying a Custom Target. # Optional. Information specifying a Custom Target. + "customTargetType": "A String", # Required. The name of the CustomTargetType. Format must be `projects/{project}/locations/{location}/customTargetTypes/{custom_target_type}`. + }, "deployParameters": { # Optional. The deploy parameters to use for this target. "a_key": "A String", }, @@ -717,14 +825,51 @@

Method Details

"releaseReadyCondition": { # ReleaseReadyCondition contains information around the status of the Release. If a release is not ready, you cannot create a rollout with the release. # Details around the Releases's overall status. "status": True or False, # True if the Release is in a valid state. Otherwise at least one condition in `ReleaseCondition` is in an invalid state. Iterate over those conditions and see which condition(s) has status = false to find out what is wrong with the Release. }, - "skaffoldSupportedCondition": { # SkaffoldSupportedCondition contains information about when support for the release's version of skaffold ends. # Details around the support state of the release's skaffold version. - "maintenanceModeTime": "A String", # The time at which this release's version of skaffold will enter maintenance mode. - "skaffoldSupportState": "A String", # The skaffold support state for this release's version of skaffold. - "status": True or False, # True if the version of skaffold used by this release is supported. - "supportExpirationTime": "A String", # The time at which this release's version of skaffold will no longer be supported. + "skaffoldSupportedCondition": { # SkaffoldSupportedCondition contains information about when support for the release's version of Skaffold ends. # Details around the support state of the release's Skaffold version. + "maintenanceModeTime": "A String", # The time at which this release's version of Skaffold will enter maintenance mode. + "skaffoldSupportState": "A String", # The Skaffold support state for this release's version of Skaffold. + "status": True or False, # True if the version of Skaffold used by this release is supported. + "supportExpirationTime": "A String", # The time at which this release's version of Skaffold will no longer be supported. }, }, "createTime": "A String", # Output only. Time at which the `Release` was created. + "customTargetTypeSnapshots": [ # Output only. Snapshot of the custom target types referenced by the targets taken at release creation time. + { # A `CustomTargetType` resource in the Cloud Deploy API. A `CustomTargetType` defines a type of custom target that can be referenced in a `Target` in order to facilitate deploying to a runtime that does not have a 1P integration with Cloud Deploy. + "annotations": { # Optional. User annotations. These attributes can only be set and used by the user, and not by Cloud Deploy. See https://google.aip.dev/128#annotations for more details such as format and size limitations. + "a_key": "A String", + }, + "createTime": "A String", # Output only. Time at which the `CustomTargetType` was created. + "customActions": { # CustomTargetSkaffoldActions represents the `CustomTargetType` configuration using Skaffold custom actions. # Configures render and deploy for the `CustomTargetType` using Skaffold custom actions. + "deployAction": "A String", # Required. The Skaffold custom action responsible for deploy operations. + "includeSkaffoldModules": [ # Optional. List of Skaffold modules Cloud Deploy will include in the Skaffold Config as required before performing diagnose. + { # Skaffold Config modules and their remote source. + "configs": [ # Optional. The Skaffold Config modules to use from the specified source. + "A String", + ], + "git": { # Git repository containing Skaffold Config modules. # Remote git repository containing the Skaffold Config modules. + "path": "A String", # Optional. Relative path from the repository root to the Skaffold file. + "ref": "A String", # Optional. Git ref the package should be cloned from. + "repo": "A String", # Required. Git repository the package should be cloned from. + }, + "googleCloudStorage": { # Cloud Storage bucket containing Skaffold Config modules. # Cloud Storage bucket containing the Skaffold Config modules. + "path": "A String", # Optional. Relative path from the source to the Skaffold file. + "source": "A String", # Required. Cloud Storage source paths to copy recursively. For example, providing "gs://my-bucket/dir/configs/*" will result in Skaffold copying all files within the "dir/configs" directory in the bucket "my-bucket". + }, + }, + ], + "renderAction": "A String", # Optional. The Skaffold custom action responsible for render operations. If not provided then Cloud Deploy will perform the render operations via `skaffold render`. + }, + "customTargetTypeId": "A String", # Output only. Resource id of the `CustomTargetType`. + "description": "A String", # Optional. Description of the `CustomTargetType`. Max length is 255 characters. + "etag": "A String", # Optional. This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. + "labels": { # Optional. Labels are attributes that can be set and used by both the user and by Cloud Deploy. Labels must meet the following constraints: * Keys and values can contain only lowercase letters, numeric characters, underscores, and dashes. * All characters must use UTF-8 encoding, and international characters are allowed. * Keys must start with a lowercase letter or international character. * Each resource is limited to a maximum of 64 labels. Both keys and values are additionally constrained to be <= 128 bytes. + "a_key": "A String", + }, + "name": "A String", # Optional. Name of the `CustomTargetType`. Format is `projects/{project}/locations/{location}/customTargetTypes/a-z{0,62}`. + "uid": "A String", # Output only. Unique identifier of the `CustomTargetType`. + "updateTime": "A String", # Output only. Most recent time at which the `CustomTargetType` was updated. + }, + ], "deliveryPipelineSnapshot": { # A `DeliveryPipeline` resource in the Cloud Deploy API. A `DeliveryPipeline` defines a pipeline through which a Skaffold configuration can progress. # Output only. Snapshot of the parent pipeline taken at release creation time. "annotations": { # User annotations. These attributes can only be set and used by the user, and not by Cloud Deploy. "a_key": "A String", @@ -734,7 +879,7 @@

Method Details

"status": True or False, # True if the Pipeline is in a valid state. Otherwise at least one condition in `PipelineCondition` is in an invalid state. Iterate over those conditions and see which condition(s) has status = false to find out what is wrong with the Pipeline. "updateTime": "A String", # Last time the condition was updated. }, - "targetsPresentCondition": { # TargetsPresentCondition contains information on any Targets defined in the Delivery Pipeline that do not actually exist. # Details around targets enumerated in the pipeline. + "targetsPresentCondition": { # `TargetsPresentCondition` contains information on any Targets referenced in the Delivery Pipeline that do not actually exist. # Details around targets enumerated in the pipeline. "missingTargets": [ # The list of Target names that do not exist. For example, `projects/{project_id}/locations/{location_name}/targets/{target_name}`. "A String", ], @@ -812,6 +957,15 @@

Method Details

"runtimeConfig": { # RuntimeConfig contains the runtime specific configurations for a deployment strategy. # Optional. Runtime specific configurations for the deployment strategy. The runtime configuration is used to determine how Cloud Deploy will split traffic to enable a progressive deployment. "cloudRun": { # CloudRunConfig contains the Cloud Run runtime configuration. # Cloud Run runtime configuration. "automaticTrafficControl": True or False, # Whether Cloud Deploy should update the traffic stanza in a Cloud Run Service on the user's behalf to facilitate traffic splitting. This is required to be true for CanaryDeployments, but optional for CustomCanaryDeployments. + "canaryRevisionTags": [ # Optional. A list of tags that are added to the canary revision while the canary deployment is in progress. + "A String", + ], + "priorRevisionTags": [ # Optional. A list of tags that are added to the prior revision while the canary deployment is in progress. + "A String", + ], + "stableRevisionTags": [ # Optional. A list of tags that are added to the final stable revision after the canary deployment is completed. + "A String", + ], }, "kubernetes": { # KubernetesConfig contains the Kubernetes runtime configuration. # Kubernetes runtime configuration. "gatewayServiceMesh": { # Information about the Kubernetes Gateway API service mesh configuration. # Kubernetes Gateway API service mesh configuration. @@ -887,6 +1041,11 @@

Method Details

"cloudRun": { # CloudRunRenderMetadata contains Cloud Run information associated with a `Release` render. # Output only. Metadata associated with rendering for Cloud Run. "service": "A String", # Output only. The name of the Cloud Run Service in the rendered manifest. Format is `projects/{project}/locations/{location}/services/{service}`. }, + "custom": { # CustomMetadata contains information from a user defined operation. # Output only. Custom metadata provided by user defined render operation. + "values": { # Output only. Key-value pairs provided by the user defined operation. + "a_key": "A String", + }, + }, }, "renderingBuild": "A String", # Output only. The resource name of the Cloud Build `Build` object that is used to render the manifest for this target. Format is `projects/{project}/locations/{location}/builds/{build}`. "renderingState": "A String", # Output only. Current state of the render operation for this Target. @@ -901,6 +1060,9 @@

Method Details

"membership": "A String", # Membership of the GKE Hub-registered cluster to which to apply the Skaffold configuration. Format is `projects/{project}/locations/{location}/memberships/{membership_name}`. }, "createTime": "A String", # Output only. Time at which the `Target` was created. + "customTarget": { # Information specifying a Custom Target. # Optional. Information specifying a Custom Target. + "customTargetType": "A String", # Required. The name of the CustomTargetType. Format must be `projects/{project}/locations/{location}/customTargetTypes/{custom_target_type}`. + }, "deployParameters": { # Optional. The deploy parameters to use for this target. "a_key": "A String", }, diff --git a/docs/dyn/clouddeploy_v1.projects.locations.deliveryPipelines.releases.rollouts.html b/docs/dyn/clouddeploy_v1.projects.locations.deliveryPipelines.releases.rollouts.html index ca0d0ebc324..4d90d364ea1 100644 --- a/docs/dyn/clouddeploy_v1.projects.locations.deliveryPipelines.releases.rollouts.html +++ b/docs/dyn/clouddeploy_v1.projects.locations.deliveryPipelines.releases.rollouts.html @@ -235,6 +235,11 @@

Method Details

"A String", ], }, + "custom": { # CustomMetadata contains information from a user defined operation. # Output only. Custom metadata provided by user defined `Rollout` operations. + "values": { # Output only. Key-value pairs provided by the user defined operation. + "a_key": "A String", + }, + }, }, "name": "A String", # Optional. Name of the `Rollout`. Format is `projects/{project}/locations/{location}/deliveryPipelines/{deliveryPipeline}/releases/{release}/rollouts/a-z{0,62}`. "phases": [ # Output only. The phases that represent the workflows of this `Rollout`. @@ -489,6 +494,11 @@

Method Details

"A String", ], }, + "custom": { # CustomMetadata contains information from a user defined operation. # Output only. Custom metadata provided by user defined `Rollout` operations. + "values": { # Output only. Key-value pairs provided by the user defined operation. + "a_key": "A String", + }, + }, }, "name": "A String", # Optional. Name of the `Rollout`. Format is `projects/{project}/locations/{location}/deliveryPipelines/{deliveryPipeline}/releases/{release}/rollouts/a-z{0,62}`. "phases": [ # Output only. The phases that represent the workflows of this `Rollout`. @@ -744,6 +754,11 @@

Method Details

"A String", ], }, + "custom": { # CustomMetadata contains information from a user defined operation. # Output only. Custom metadata provided by user defined `Rollout` operations. + "values": { # Output only. Key-value pairs provided by the user defined operation. + "a_key": "A String", + }, + }, }, "name": "A String", # Optional. Name of the `Rollout`. Format is `projects/{project}/locations/{location}/deliveryPipelines/{deliveryPipeline}/releases/{release}/rollouts/a-z{0,62}`. "phases": [ # Output only. The phases that represent the workflows of this `Rollout`. diff --git a/docs/dyn/clouddeploy_v1.projects.locations.deliveryPipelines.releases.rollouts.jobRuns.html b/docs/dyn/clouddeploy_v1.projects.locations.deliveryPipelines.releases.rollouts.jobRuns.html index 64cb0d9b3c2..f09b89bc9c6 100644 --- a/docs/dyn/clouddeploy_v1.projects.locations.deliveryPipelines.releases.rollouts.jobRuns.html +++ b/docs/dyn/clouddeploy_v1.projects.locations.deliveryPipelines.releases.rollouts.jobRuns.html @@ -138,6 +138,14 @@

Method Details

"A String", ], }, + "custom": { # CustomMetadata contains information from a user defined operation. # Output only. Custom metadata provided by user defined deploy operation. + "values": { # Output only. Key-value pairs provided by the user defined operation. + "a_key": "A String", + }, + }, + "customTarget": { # CustomTargetDeployMetadata contains information from a Custom Target deploy operation. # Output only. Custom Target metadata associated with a `DeployJobRun`. + "skipMessage": "A String", # Output only. Skip message provided in the results of a custom deploy operation. + }, }, }, "endTime": "A String", # Output only. Time at which the `JobRun` ended. @@ -217,6 +225,14 @@

Method Details

"A String", ], }, + "custom": { # CustomMetadata contains information from a user defined operation. # Output only. Custom metadata provided by user defined deploy operation. + "values": { # Output only. Key-value pairs provided by the user defined operation. + "a_key": "A String", + }, + }, + "customTarget": { # CustomTargetDeployMetadata contains information from a Custom Target deploy operation. # Output only. Custom Target metadata associated with a `DeployJobRun`. + "skipMessage": "A String", # Output only. Skip message provided in the results of a custom deploy operation. + }, }, }, "endTime": "A String", # Output only. Time at which the `JobRun` ended. diff --git a/docs/dyn/clouddeploy_v1.projects.locations.html b/docs/dyn/clouddeploy_v1.projects.locations.html index 8d8dab1929d..5845e9ea782 100644 --- a/docs/dyn/clouddeploy_v1.projects.locations.html +++ b/docs/dyn/clouddeploy_v1.projects.locations.html @@ -74,6 +74,11 @@

Cloud Deploy API . projects . locations

Instance Methods

+

+ customTargetTypes() +

+

Returns the customTargetTypes Resource.

+

deliveryPipelines()

@@ -156,13 +161,13 @@

Method Details

"name": "A String", # Name of the configuration. "supportedVersions": [ # All supported versions of Skaffold. { # Details of a supported Skaffold version. - "maintenanceModeTime": "A String", # The time at which this version of skaffold will enter maintenance mode. + "maintenanceModeTime": "A String", # The time at which this version of Skaffold will enter maintenance mode. "supportEndDate": { # Represents a whole or partial calendar date, such as a birthday. The time of day and time zone are either specified elsewhere or are insignificant. The date is relative to the Gregorian Calendar. This can represent one of the following: * A full date, with non-zero year, month, and day values. * A month and day, with a zero year (for example, an anniversary). * A year on its own, with a zero month and a zero day. * A year and month, with a zero day (for example, a credit card expiration date). Related types: * google.type.TimeOfDay * google.type.DateTime * google.protobuf.Timestamp # Date when this version is expected to no longer be supported. "day": 42, # Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant. "month": 42, # Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day. "year": 42, # Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year. }, - "supportExpirationTime": "A String", # The time at which this version of skaffold will no longer be supported. + "supportExpirationTime": "A String", # The time at which this version of Skaffold will no longer be supported. "version": "A String", # Release version number. For example, "1.20.3". }, ], diff --git a/docs/dyn/clouddeploy_v1.projects.locations.targets.html b/docs/dyn/clouddeploy_v1.projects.locations.targets.html index 1d259220115..a08ad068547 100644 --- a/docs/dyn/clouddeploy_v1.projects.locations.targets.html +++ b/docs/dyn/clouddeploy_v1.projects.locations.targets.html @@ -127,6 +127,9 @@

Method Details

"membership": "A String", # Membership of the GKE Hub-registered cluster to which to apply the Skaffold configuration. Format is `projects/{project}/locations/{location}/memberships/{membership_name}`. }, "createTime": "A String", # Output only. Time at which the `Target` was created. + "customTarget": { # Information specifying a Custom Target. # Optional. Information specifying a Custom Target. + "customTargetType": "A String", # Required. The name of the CustomTargetType. Format must be `projects/{project}/locations/{location}/customTargetTypes/{custom_target_type}`. + }, "deployParameters": { # Optional. The deploy parameters to use for this target. "a_key": "A String", }, @@ -267,6 +270,9 @@

Method Details

"membership": "A String", # Membership of the GKE Hub-registered cluster to which to apply the Skaffold configuration. Format is `projects/{project}/locations/{location}/memberships/{membership_name}`. }, "createTime": "A String", # Output only. Time at which the `Target` was created. + "customTarget": { # Information specifying a Custom Target. # Optional. Information specifying a Custom Target. + "customTargetType": "A String", # Required. The name of the CustomTargetType. Format must be `projects/{project}/locations/{location}/customTargetTypes/{custom_target_type}`. + }, "deployParameters": { # Optional. The deploy parameters to use for this target. "a_key": "A String", }, @@ -392,6 +398,9 @@

Method Details

"membership": "A String", # Membership of the GKE Hub-registered cluster to which to apply the Skaffold configuration. Format is `projects/{project}/locations/{location}/memberships/{membership_name}`. }, "createTime": "A String", # Output only. Time at which the `Target` was created. + "customTarget": { # Information specifying a Custom Target. # Optional. Information specifying a Custom Target. + "customTargetType": "A String", # Required. The name of the CustomTargetType. Format must be `projects/{project}/locations/{location}/customTargetTypes/{custom_target_type}`. + }, "deployParameters": { # Optional. The deploy parameters to use for this target. "a_key": "A String", }, @@ -476,6 +485,9 @@

Method Details

"membership": "A String", # Membership of the GKE Hub-registered cluster to which to apply the Skaffold configuration. Format is `projects/{project}/locations/{location}/memberships/{membership_name}`. }, "createTime": "A String", # Output only. Time at which the `Target` was created. + "customTarget": { # Information specifying a Custom Target. # Optional. Information specifying a Custom Target. + "customTargetType": "A String", # Required. The name of the CustomTargetType. Format must be `projects/{project}/locations/{location}/customTargetTypes/{custom_target_type}`. + }, "deployParameters": { # Optional. The deploy parameters to use for this target. "a_key": "A String", }, diff --git a/docs/dyn/cloudscheduler_v1.projects.locations.jobs.html b/docs/dyn/cloudscheduler_v1.projects.locations.jobs.html index 8ea2535d8d8..85b7f327bc3 100644 --- a/docs/dyn/cloudscheduler_v1.projects.locations.jobs.html +++ b/docs/dyn/cloudscheduler_v1.projects.locations.jobs.html @@ -128,7 +128,7 @@

Method Details

"version": "A String", # App version. By default, the job is sent to the version which is the default version when the job is attempted. }, "body": "A String", # Body. HTTP request body. A request body is allowed only if the HTTP method is POST or PUT. It will result in invalid argument error to set a body on a job with an incompatible HttpMethod. - "headers": { # HTTP request headers. This map contains the header field names and values. Headers can be set when the job is created. Cloud Scheduler sets some headers to default values: * `User-Agent`: By default, this header is `"AppEngine-Google; (+http://code.google.com/appengine)"`. This header can be modified, but Cloud Scheduler will append `"AppEngine-Google; (+http://code.google.com/appengine)"` to the modified `User-Agent`. * `X-CloudScheduler`: This header will be set to true. * `X-CloudScheduler-JobName`: This header will contain the job name. * `X-CloudScheduler-ScheduleTime`: For Cloud Scheduler jobs specified in the unix-cron format, this header will contain the job schedule time in RFC3339 UTC "Zulu" format. If the job has a body and the following headers are not set by the user, Cloud Scheduler sets default values: * `Content-Type`: This will be set to `"application/octet-stream"`. You can override this default by explicitly setting `Content-Type` to a particular media type when creating the job. For example, you can set `Content-Type` to `"application/json"`. The headers below are output only. They cannot be set or overridden: * `Content-Length`: This is computed by Cloud Scheduler. * `X-Google-*`: For Google internal use only. * `X-AppEngine-*`: For Google internal use only. In addition, some App Engine headers, which contain job-specific information, are also be sent to the job handler. + "headers": { # HTTP request headers. This map contains the header field names and values. Headers can be set when the job is created. Cloud Scheduler sets some headers to default values: * `User-Agent`: By default, this header is `"AppEngine-Google; (+http://code.google.com/appengine)"`. This header can be modified, but Cloud Scheduler will append `"AppEngine-Google; (+http://code.google.com/appengine)"` to the modified `User-Agent`. * `X-CloudScheduler`: This header will be set to true. * `X-CloudScheduler-JobName`: This header will contain the job name. * `X-CloudScheduler-ScheduleTime`: For Cloud Scheduler jobs specified in the unix-cron format, this header will contain the job schedule as an offset of UTC parsed according to RFC3339. If the job has a body and the following headers are not set by the user, Cloud Scheduler sets default values: * `Content-Type`: This will be set to `"application/octet-stream"`. You can override this default by explicitly setting `Content-Type` to a particular media type when creating the job. For example, you can set `Content-Type` to `"application/json"`. The headers below are output only. They cannot be set or overridden: * `Content-Length`: This is computed by Cloud Scheduler. * `X-Google-*`: For Google internal use only. * `X-AppEngine-*`: For Google internal use only. In addition, some App Engine headers, which contain job-specific information, are also be sent to the job handler. "a_key": "A String", }, "httpMethod": "A String", # The HTTP method to use for the request. PATCH and OPTIONS are not permitted. @@ -138,7 +138,7 @@

Method Details

"description": "A String", # Optionally caller-specified in CreateJob or UpdateJob. A human-readable description for the job. This string must not contain more than 500 characters. "httpTarget": { # Http target. The job will be pushed to the job handler by means of an HTTP request via an http_method such as HTTP POST, HTTP GET, etc. The job is acknowledged by means of an HTTP response code in the range [200 - 299]. A failure to receive a response constitutes a failed execution. For a redirected request, the response returned by the redirected request is considered. # HTTP target. "body": "A String", # HTTP request body. A request body is allowed only if the HTTP method is POST, PUT, or PATCH. It is an error to set body on a job with an incompatible HttpMethod. - "headers": { # HTTP request headers. This map contains the header field names and values. The user can specify HTTP request headers to send with the job's HTTP request. Repeated headers are not supported, but a header value can contain commas. The following headers represent a subset of the headers that accompany the job's HTTP request. Some HTTP request headers are ignored or replaced. A partial list of headers that are ignored or replaced is below: * Host: This will be computed by Cloud Scheduler and derived from uri. * `Content-Length`: This will be computed by Cloud Scheduler. * `User-Agent`: This will be set to `"Google-Cloud-Scheduler"`. * `X-Google-*`: Google internal use only. * `X-AppEngine-*`: Google internal use only. * `X-CloudScheduler`: This header will be set to true. * `X-CloudScheduler-JobName`: This header will contain the job name. * `X-CloudScheduler-ScheduleTime`: For Cloud Scheduler jobs specified in the unix-cron format, this header will contain the job schedule time in RFC3339 UTC "Zulu" format. If the job has a body and the following headers are not set by the user, Cloud Scheduler sets default values: * `Content-Type`: This will be set to `"application/octet-stream"`. You can override this default by explicitly setting `Content-Type` to a particular media type when creating the job. For example, you can set `Content-Type` to `"application/json"`. The total size of headers must be less than 80KB. + "headers": { # HTTP request headers. This map contains the header field names and values. The user can specify HTTP request headers to send with the job's HTTP request. Repeated headers are not supported, but a header value can contain commas. The following headers represent a subset of the headers that accompany the job's HTTP request. Some HTTP request headers are ignored or replaced. A partial list of headers that are ignored or replaced is below: * Host: This will be computed by Cloud Scheduler and derived from uri. * `Content-Length`: This will be computed by Cloud Scheduler. * `User-Agent`: This will be set to `"Google-Cloud-Scheduler"`. * `X-Google-*`: Google internal use only. * `X-AppEngine-*`: Google internal use only. * `X-CloudScheduler`: This header will be set to true. * `X-CloudScheduler-JobName`: This header will contain the job name. * `X-CloudScheduler-ScheduleTime`: For Cloud Scheduler jobs specified in the unix-cron format, this header will contain the job schedule as an offset of UTC parsed according to RFC3339. If the job has a body and the following headers are not set by the user, Cloud Scheduler sets default values: * `Content-Type`: This will be set to `"application/octet-stream"`. You can override this default by explicitly setting `Content-Type` to a particular media type when creating the job. For example, you can set `Content-Type` to `"application/json"`. The total size of headers must be less than 80KB. "a_key": "A String", }, "httpMethod": "A String", # Which HTTP method to use for the request. @@ -201,7 +201,7 @@

Method Details

"version": "A String", # App version. By default, the job is sent to the version which is the default version when the job is attempted. }, "body": "A String", # Body. HTTP request body. A request body is allowed only if the HTTP method is POST or PUT. It will result in invalid argument error to set a body on a job with an incompatible HttpMethod. - "headers": { # HTTP request headers. This map contains the header field names and values. Headers can be set when the job is created. Cloud Scheduler sets some headers to default values: * `User-Agent`: By default, this header is `"AppEngine-Google; (+http://code.google.com/appengine)"`. This header can be modified, but Cloud Scheduler will append `"AppEngine-Google; (+http://code.google.com/appengine)"` to the modified `User-Agent`. * `X-CloudScheduler`: This header will be set to true. * `X-CloudScheduler-JobName`: This header will contain the job name. * `X-CloudScheduler-ScheduleTime`: For Cloud Scheduler jobs specified in the unix-cron format, this header will contain the job schedule time in RFC3339 UTC "Zulu" format. If the job has a body and the following headers are not set by the user, Cloud Scheduler sets default values: * `Content-Type`: This will be set to `"application/octet-stream"`. You can override this default by explicitly setting `Content-Type` to a particular media type when creating the job. For example, you can set `Content-Type` to `"application/json"`. The headers below are output only. They cannot be set or overridden: * `Content-Length`: This is computed by Cloud Scheduler. * `X-Google-*`: For Google internal use only. * `X-AppEngine-*`: For Google internal use only. In addition, some App Engine headers, which contain job-specific information, are also be sent to the job handler. + "headers": { # HTTP request headers. This map contains the header field names and values. Headers can be set when the job is created. Cloud Scheduler sets some headers to default values: * `User-Agent`: By default, this header is `"AppEngine-Google; (+http://code.google.com/appengine)"`. This header can be modified, but Cloud Scheduler will append `"AppEngine-Google; (+http://code.google.com/appengine)"` to the modified `User-Agent`. * `X-CloudScheduler`: This header will be set to true. * `X-CloudScheduler-JobName`: This header will contain the job name. * `X-CloudScheduler-ScheduleTime`: For Cloud Scheduler jobs specified in the unix-cron format, this header will contain the job schedule as an offset of UTC parsed according to RFC3339. If the job has a body and the following headers are not set by the user, Cloud Scheduler sets default values: * `Content-Type`: This will be set to `"application/octet-stream"`. You can override this default by explicitly setting `Content-Type` to a particular media type when creating the job. For example, you can set `Content-Type` to `"application/json"`. The headers below are output only. They cannot be set or overridden: * `Content-Length`: This is computed by Cloud Scheduler. * `X-Google-*`: For Google internal use only. * `X-AppEngine-*`: For Google internal use only. In addition, some App Engine headers, which contain job-specific information, are also be sent to the job handler. "a_key": "A String", }, "httpMethod": "A String", # The HTTP method to use for the request. PATCH and OPTIONS are not permitted. @@ -211,7 +211,7 @@

Method Details

"description": "A String", # Optionally caller-specified in CreateJob or UpdateJob. A human-readable description for the job. This string must not contain more than 500 characters. "httpTarget": { # Http target. The job will be pushed to the job handler by means of an HTTP request via an http_method such as HTTP POST, HTTP GET, etc. The job is acknowledged by means of an HTTP response code in the range [200 - 299]. A failure to receive a response constitutes a failed execution. For a redirected request, the response returned by the redirected request is considered. # HTTP target. "body": "A String", # HTTP request body. A request body is allowed only if the HTTP method is POST, PUT, or PATCH. It is an error to set body on a job with an incompatible HttpMethod. - "headers": { # HTTP request headers. This map contains the header field names and values. The user can specify HTTP request headers to send with the job's HTTP request. Repeated headers are not supported, but a header value can contain commas. The following headers represent a subset of the headers that accompany the job's HTTP request. Some HTTP request headers are ignored or replaced. A partial list of headers that are ignored or replaced is below: * Host: This will be computed by Cloud Scheduler and derived from uri. * `Content-Length`: This will be computed by Cloud Scheduler. * `User-Agent`: This will be set to `"Google-Cloud-Scheduler"`. * `X-Google-*`: Google internal use only. * `X-AppEngine-*`: Google internal use only. * `X-CloudScheduler`: This header will be set to true. * `X-CloudScheduler-JobName`: This header will contain the job name. * `X-CloudScheduler-ScheduleTime`: For Cloud Scheduler jobs specified in the unix-cron format, this header will contain the job schedule time in RFC3339 UTC "Zulu" format. If the job has a body and the following headers are not set by the user, Cloud Scheduler sets default values: * `Content-Type`: This will be set to `"application/octet-stream"`. You can override this default by explicitly setting `Content-Type` to a particular media type when creating the job. For example, you can set `Content-Type` to `"application/json"`. The total size of headers must be less than 80KB. + "headers": { # HTTP request headers. This map contains the header field names and values. The user can specify HTTP request headers to send with the job's HTTP request. Repeated headers are not supported, but a header value can contain commas. The following headers represent a subset of the headers that accompany the job's HTTP request. Some HTTP request headers are ignored or replaced. A partial list of headers that are ignored or replaced is below: * Host: This will be computed by Cloud Scheduler and derived from uri. * `Content-Length`: This will be computed by Cloud Scheduler. * `User-Agent`: This will be set to `"Google-Cloud-Scheduler"`. * `X-Google-*`: Google internal use only. * `X-AppEngine-*`: Google internal use only. * `X-CloudScheduler`: This header will be set to true. * `X-CloudScheduler-JobName`: This header will contain the job name. * `X-CloudScheduler-ScheduleTime`: For Cloud Scheduler jobs specified in the unix-cron format, this header will contain the job schedule as an offset of UTC parsed according to RFC3339. If the job has a body and the following headers are not set by the user, Cloud Scheduler sets default values: * `Content-Type`: This will be set to `"application/octet-stream"`. You can override this default by explicitly setting `Content-Type` to a particular media type when creating the job. For example, you can set `Content-Type` to `"application/json"`. The total size of headers must be less than 80KB. "a_key": "A String", }, "httpMethod": "A String", # Which HTTP method to use for the request. @@ -299,7 +299,7 @@

Method Details

"version": "A String", # App version. By default, the job is sent to the version which is the default version when the job is attempted. }, "body": "A String", # Body. HTTP request body. A request body is allowed only if the HTTP method is POST or PUT. It will result in invalid argument error to set a body on a job with an incompatible HttpMethod. - "headers": { # HTTP request headers. This map contains the header field names and values. Headers can be set when the job is created. Cloud Scheduler sets some headers to default values: * `User-Agent`: By default, this header is `"AppEngine-Google; (+http://code.google.com/appengine)"`. This header can be modified, but Cloud Scheduler will append `"AppEngine-Google; (+http://code.google.com/appengine)"` to the modified `User-Agent`. * `X-CloudScheduler`: This header will be set to true. * `X-CloudScheduler-JobName`: This header will contain the job name. * `X-CloudScheduler-ScheduleTime`: For Cloud Scheduler jobs specified in the unix-cron format, this header will contain the job schedule time in RFC3339 UTC "Zulu" format. If the job has a body and the following headers are not set by the user, Cloud Scheduler sets default values: * `Content-Type`: This will be set to `"application/octet-stream"`. You can override this default by explicitly setting `Content-Type` to a particular media type when creating the job. For example, you can set `Content-Type` to `"application/json"`. The headers below are output only. They cannot be set or overridden: * `Content-Length`: This is computed by Cloud Scheduler. * `X-Google-*`: For Google internal use only. * `X-AppEngine-*`: For Google internal use only. In addition, some App Engine headers, which contain job-specific information, are also be sent to the job handler. + "headers": { # HTTP request headers. This map contains the header field names and values. Headers can be set when the job is created. Cloud Scheduler sets some headers to default values: * `User-Agent`: By default, this header is `"AppEngine-Google; (+http://code.google.com/appengine)"`. This header can be modified, but Cloud Scheduler will append `"AppEngine-Google; (+http://code.google.com/appengine)"` to the modified `User-Agent`. * `X-CloudScheduler`: This header will be set to true. * `X-CloudScheduler-JobName`: This header will contain the job name. * `X-CloudScheduler-ScheduleTime`: For Cloud Scheduler jobs specified in the unix-cron format, this header will contain the job schedule as an offset of UTC parsed according to RFC3339. If the job has a body and the following headers are not set by the user, Cloud Scheduler sets default values: * `Content-Type`: This will be set to `"application/octet-stream"`. You can override this default by explicitly setting `Content-Type` to a particular media type when creating the job. For example, you can set `Content-Type` to `"application/json"`. The headers below are output only. They cannot be set or overridden: * `Content-Length`: This is computed by Cloud Scheduler. * `X-Google-*`: For Google internal use only. * `X-AppEngine-*`: For Google internal use only. In addition, some App Engine headers, which contain job-specific information, are also be sent to the job handler. "a_key": "A String", }, "httpMethod": "A String", # The HTTP method to use for the request. PATCH and OPTIONS are not permitted. @@ -309,7 +309,7 @@

Method Details

"description": "A String", # Optionally caller-specified in CreateJob or UpdateJob. A human-readable description for the job. This string must not contain more than 500 characters. "httpTarget": { # Http target. The job will be pushed to the job handler by means of an HTTP request via an http_method such as HTTP POST, HTTP GET, etc. The job is acknowledged by means of an HTTP response code in the range [200 - 299]. A failure to receive a response constitutes a failed execution. For a redirected request, the response returned by the redirected request is considered. # HTTP target. "body": "A String", # HTTP request body. A request body is allowed only if the HTTP method is POST, PUT, or PATCH. It is an error to set body on a job with an incompatible HttpMethod. - "headers": { # HTTP request headers. This map contains the header field names and values. The user can specify HTTP request headers to send with the job's HTTP request. Repeated headers are not supported, but a header value can contain commas. The following headers represent a subset of the headers that accompany the job's HTTP request. Some HTTP request headers are ignored or replaced. A partial list of headers that are ignored or replaced is below: * Host: This will be computed by Cloud Scheduler and derived from uri. * `Content-Length`: This will be computed by Cloud Scheduler. * `User-Agent`: This will be set to `"Google-Cloud-Scheduler"`. * `X-Google-*`: Google internal use only. * `X-AppEngine-*`: Google internal use only. * `X-CloudScheduler`: This header will be set to true. * `X-CloudScheduler-JobName`: This header will contain the job name. * `X-CloudScheduler-ScheduleTime`: For Cloud Scheduler jobs specified in the unix-cron format, this header will contain the job schedule time in RFC3339 UTC "Zulu" format. If the job has a body and the following headers are not set by the user, Cloud Scheduler sets default values: * `Content-Type`: This will be set to `"application/octet-stream"`. You can override this default by explicitly setting `Content-Type` to a particular media type when creating the job. For example, you can set `Content-Type` to `"application/json"`. The total size of headers must be less than 80KB. + "headers": { # HTTP request headers. This map contains the header field names and values. The user can specify HTTP request headers to send with the job's HTTP request. Repeated headers are not supported, but a header value can contain commas. The following headers represent a subset of the headers that accompany the job's HTTP request. Some HTTP request headers are ignored or replaced. A partial list of headers that are ignored or replaced is below: * Host: This will be computed by Cloud Scheduler and derived from uri. * `Content-Length`: This will be computed by Cloud Scheduler. * `User-Agent`: This will be set to `"Google-Cloud-Scheduler"`. * `X-Google-*`: Google internal use only. * `X-AppEngine-*`: Google internal use only. * `X-CloudScheduler`: This header will be set to true. * `X-CloudScheduler-JobName`: This header will contain the job name. * `X-CloudScheduler-ScheduleTime`: For Cloud Scheduler jobs specified in the unix-cron format, this header will contain the job schedule as an offset of UTC parsed according to RFC3339. If the job has a body and the following headers are not set by the user, Cloud Scheduler sets default values: * `Content-Type`: This will be set to `"application/octet-stream"`. You can override this default by explicitly setting `Content-Type` to a particular media type when creating the job. For example, you can set `Content-Type` to `"application/json"`. The total size of headers must be less than 80KB. "a_key": "A String", }, "httpMethod": "A String", # Which HTTP method to use for the request. @@ -383,7 +383,7 @@

Method Details

"version": "A String", # App version. By default, the job is sent to the version which is the default version when the job is attempted. }, "body": "A String", # Body. HTTP request body. A request body is allowed only if the HTTP method is POST or PUT. It will result in invalid argument error to set a body on a job with an incompatible HttpMethod. - "headers": { # HTTP request headers. This map contains the header field names and values. Headers can be set when the job is created. Cloud Scheduler sets some headers to default values: * `User-Agent`: By default, this header is `"AppEngine-Google; (+http://code.google.com/appengine)"`. This header can be modified, but Cloud Scheduler will append `"AppEngine-Google; (+http://code.google.com/appengine)"` to the modified `User-Agent`. * `X-CloudScheduler`: This header will be set to true. * `X-CloudScheduler-JobName`: This header will contain the job name. * `X-CloudScheduler-ScheduleTime`: For Cloud Scheduler jobs specified in the unix-cron format, this header will contain the job schedule time in RFC3339 UTC "Zulu" format. If the job has a body and the following headers are not set by the user, Cloud Scheduler sets default values: * `Content-Type`: This will be set to `"application/octet-stream"`. You can override this default by explicitly setting `Content-Type` to a particular media type when creating the job. For example, you can set `Content-Type` to `"application/json"`. The headers below are output only. They cannot be set or overridden: * `Content-Length`: This is computed by Cloud Scheduler. * `X-Google-*`: For Google internal use only. * `X-AppEngine-*`: For Google internal use only. In addition, some App Engine headers, which contain job-specific information, are also be sent to the job handler. + "headers": { # HTTP request headers. This map contains the header field names and values. Headers can be set when the job is created. Cloud Scheduler sets some headers to default values: * `User-Agent`: By default, this header is `"AppEngine-Google; (+http://code.google.com/appengine)"`. This header can be modified, but Cloud Scheduler will append `"AppEngine-Google; (+http://code.google.com/appengine)"` to the modified `User-Agent`. * `X-CloudScheduler`: This header will be set to true. * `X-CloudScheduler-JobName`: This header will contain the job name. * `X-CloudScheduler-ScheduleTime`: For Cloud Scheduler jobs specified in the unix-cron format, this header will contain the job schedule as an offset of UTC parsed according to RFC3339. If the job has a body and the following headers are not set by the user, Cloud Scheduler sets default values: * `Content-Type`: This will be set to `"application/octet-stream"`. You can override this default by explicitly setting `Content-Type` to a particular media type when creating the job. For example, you can set `Content-Type` to `"application/json"`. The headers below are output only. They cannot be set or overridden: * `Content-Length`: This is computed by Cloud Scheduler. * `X-Google-*`: For Google internal use only. * `X-AppEngine-*`: For Google internal use only. In addition, some App Engine headers, which contain job-specific information, are also be sent to the job handler. "a_key": "A String", }, "httpMethod": "A String", # The HTTP method to use for the request. PATCH and OPTIONS are not permitted. @@ -393,7 +393,7 @@

Method Details

"description": "A String", # Optionally caller-specified in CreateJob or UpdateJob. A human-readable description for the job. This string must not contain more than 500 characters. "httpTarget": { # Http target. The job will be pushed to the job handler by means of an HTTP request via an http_method such as HTTP POST, HTTP GET, etc. The job is acknowledged by means of an HTTP response code in the range [200 - 299]. A failure to receive a response constitutes a failed execution. For a redirected request, the response returned by the redirected request is considered. # HTTP target. "body": "A String", # HTTP request body. A request body is allowed only if the HTTP method is POST, PUT, or PATCH. It is an error to set body on a job with an incompatible HttpMethod. - "headers": { # HTTP request headers. This map contains the header field names and values. The user can specify HTTP request headers to send with the job's HTTP request. Repeated headers are not supported, but a header value can contain commas. The following headers represent a subset of the headers that accompany the job's HTTP request. Some HTTP request headers are ignored or replaced. A partial list of headers that are ignored or replaced is below: * Host: This will be computed by Cloud Scheduler and derived from uri. * `Content-Length`: This will be computed by Cloud Scheduler. * `User-Agent`: This will be set to `"Google-Cloud-Scheduler"`. * `X-Google-*`: Google internal use only. * `X-AppEngine-*`: Google internal use only. * `X-CloudScheduler`: This header will be set to true. * `X-CloudScheduler-JobName`: This header will contain the job name. * `X-CloudScheduler-ScheduleTime`: For Cloud Scheduler jobs specified in the unix-cron format, this header will contain the job schedule time in RFC3339 UTC "Zulu" format. If the job has a body and the following headers are not set by the user, Cloud Scheduler sets default values: * `Content-Type`: This will be set to `"application/octet-stream"`. You can override this default by explicitly setting `Content-Type` to a particular media type when creating the job. For example, you can set `Content-Type` to `"application/json"`. The total size of headers must be less than 80KB. + "headers": { # HTTP request headers. This map contains the header field names and values. The user can specify HTTP request headers to send with the job's HTTP request. Repeated headers are not supported, but a header value can contain commas. The following headers represent a subset of the headers that accompany the job's HTTP request. Some HTTP request headers are ignored or replaced. A partial list of headers that are ignored or replaced is below: * Host: This will be computed by Cloud Scheduler and derived from uri. * `Content-Length`: This will be computed by Cloud Scheduler. * `User-Agent`: This will be set to `"Google-Cloud-Scheduler"`. * `X-Google-*`: Google internal use only. * `X-AppEngine-*`: Google internal use only. * `X-CloudScheduler`: This header will be set to true. * `X-CloudScheduler-JobName`: This header will contain the job name. * `X-CloudScheduler-ScheduleTime`: For Cloud Scheduler jobs specified in the unix-cron format, this header will contain the job schedule as an offset of UTC parsed according to RFC3339. If the job has a body and the following headers are not set by the user, Cloud Scheduler sets default values: * `Content-Type`: This will be set to `"application/octet-stream"`. You can override this default by explicitly setting `Content-Type` to a particular media type when creating the job. For example, you can set `Content-Type` to `"application/json"`. The total size of headers must be less than 80KB. "a_key": "A String", }, "httpMethod": "A String", # Which HTTP method to use for the request. @@ -475,7 +475,7 @@

Method Details

"version": "A String", # App version. By default, the job is sent to the version which is the default version when the job is attempted. }, "body": "A String", # Body. HTTP request body. A request body is allowed only if the HTTP method is POST or PUT. It will result in invalid argument error to set a body on a job with an incompatible HttpMethod. - "headers": { # HTTP request headers. This map contains the header field names and values. Headers can be set when the job is created. Cloud Scheduler sets some headers to default values: * `User-Agent`: By default, this header is `"AppEngine-Google; (+http://code.google.com/appengine)"`. This header can be modified, but Cloud Scheduler will append `"AppEngine-Google; (+http://code.google.com/appengine)"` to the modified `User-Agent`. * `X-CloudScheduler`: This header will be set to true. * `X-CloudScheduler-JobName`: This header will contain the job name. * `X-CloudScheduler-ScheduleTime`: For Cloud Scheduler jobs specified in the unix-cron format, this header will contain the job schedule time in RFC3339 UTC "Zulu" format. If the job has a body and the following headers are not set by the user, Cloud Scheduler sets default values: * `Content-Type`: This will be set to `"application/octet-stream"`. You can override this default by explicitly setting `Content-Type` to a particular media type when creating the job. For example, you can set `Content-Type` to `"application/json"`. The headers below are output only. They cannot be set or overridden: * `Content-Length`: This is computed by Cloud Scheduler. * `X-Google-*`: For Google internal use only. * `X-AppEngine-*`: For Google internal use only. In addition, some App Engine headers, which contain job-specific information, are also be sent to the job handler. + "headers": { # HTTP request headers. This map contains the header field names and values. Headers can be set when the job is created. Cloud Scheduler sets some headers to default values: * `User-Agent`: By default, this header is `"AppEngine-Google; (+http://code.google.com/appengine)"`. This header can be modified, but Cloud Scheduler will append `"AppEngine-Google; (+http://code.google.com/appengine)"` to the modified `User-Agent`. * `X-CloudScheduler`: This header will be set to true. * `X-CloudScheduler-JobName`: This header will contain the job name. * `X-CloudScheduler-ScheduleTime`: For Cloud Scheduler jobs specified in the unix-cron format, this header will contain the job schedule as an offset of UTC parsed according to RFC3339. If the job has a body and the following headers are not set by the user, Cloud Scheduler sets default values: * `Content-Type`: This will be set to `"application/octet-stream"`. You can override this default by explicitly setting `Content-Type` to a particular media type when creating the job. For example, you can set `Content-Type` to `"application/json"`. The headers below are output only. They cannot be set or overridden: * `Content-Length`: This is computed by Cloud Scheduler. * `X-Google-*`: For Google internal use only. * `X-AppEngine-*`: For Google internal use only. In addition, some App Engine headers, which contain job-specific information, are also be sent to the job handler. "a_key": "A String", }, "httpMethod": "A String", # The HTTP method to use for the request. PATCH and OPTIONS are not permitted. @@ -485,7 +485,7 @@

Method Details

"description": "A String", # Optionally caller-specified in CreateJob or UpdateJob. A human-readable description for the job. This string must not contain more than 500 characters. "httpTarget": { # Http target. The job will be pushed to the job handler by means of an HTTP request via an http_method such as HTTP POST, HTTP GET, etc. The job is acknowledged by means of an HTTP response code in the range [200 - 299]. A failure to receive a response constitutes a failed execution. For a redirected request, the response returned by the redirected request is considered. # HTTP target. "body": "A String", # HTTP request body. A request body is allowed only if the HTTP method is POST, PUT, or PATCH. It is an error to set body on a job with an incompatible HttpMethod. - "headers": { # HTTP request headers. This map contains the header field names and values. The user can specify HTTP request headers to send with the job's HTTP request. Repeated headers are not supported, but a header value can contain commas. The following headers represent a subset of the headers that accompany the job's HTTP request. Some HTTP request headers are ignored or replaced. A partial list of headers that are ignored or replaced is below: * Host: This will be computed by Cloud Scheduler and derived from uri. * `Content-Length`: This will be computed by Cloud Scheduler. * `User-Agent`: This will be set to `"Google-Cloud-Scheduler"`. * `X-Google-*`: Google internal use only. * `X-AppEngine-*`: Google internal use only. * `X-CloudScheduler`: This header will be set to true. * `X-CloudScheduler-JobName`: This header will contain the job name. * `X-CloudScheduler-ScheduleTime`: For Cloud Scheduler jobs specified in the unix-cron format, this header will contain the job schedule time in RFC3339 UTC "Zulu" format. If the job has a body and the following headers are not set by the user, Cloud Scheduler sets default values: * `Content-Type`: This will be set to `"application/octet-stream"`. You can override this default by explicitly setting `Content-Type` to a particular media type when creating the job. For example, you can set `Content-Type` to `"application/json"`. The total size of headers must be less than 80KB. + "headers": { # HTTP request headers. This map contains the header field names and values. The user can specify HTTP request headers to send with the job's HTTP request. Repeated headers are not supported, but a header value can contain commas. The following headers represent a subset of the headers that accompany the job's HTTP request. Some HTTP request headers are ignored or replaced. A partial list of headers that are ignored or replaced is below: * Host: This will be computed by Cloud Scheduler and derived from uri. * `Content-Length`: This will be computed by Cloud Scheduler. * `User-Agent`: This will be set to `"Google-Cloud-Scheduler"`. * `X-Google-*`: Google internal use only. * `X-AppEngine-*`: Google internal use only. * `X-CloudScheduler`: This header will be set to true. * `X-CloudScheduler-JobName`: This header will contain the job name. * `X-CloudScheduler-ScheduleTime`: For Cloud Scheduler jobs specified in the unix-cron format, this header will contain the job schedule as an offset of UTC parsed according to RFC3339. If the job has a body and the following headers are not set by the user, Cloud Scheduler sets default values: * `Content-Type`: This will be set to `"application/octet-stream"`. You can override this default by explicitly setting `Content-Type` to a particular media type when creating the job. For example, you can set `Content-Type` to `"application/json"`. The total size of headers must be less than 80KB. "a_key": "A String", }, "httpMethod": "A String", # Which HTTP method to use for the request. @@ -549,7 +549,7 @@

Method Details

"version": "A String", # App version. By default, the job is sent to the version which is the default version when the job is attempted. }, "body": "A String", # Body. HTTP request body. A request body is allowed only if the HTTP method is POST or PUT. It will result in invalid argument error to set a body on a job with an incompatible HttpMethod. - "headers": { # HTTP request headers. This map contains the header field names and values. Headers can be set when the job is created. Cloud Scheduler sets some headers to default values: * `User-Agent`: By default, this header is `"AppEngine-Google; (+http://code.google.com/appengine)"`. This header can be modified, but Cloud Scheduler will append `"AppEngine-Google; (+http://code.google.com/appengine)"` to the modified `User-Agent`. * `X-CloudScheduler`: This header will be set to true. * `X-CloudScheduler-JobName`: This header will contain the job name. * `X-CloudScheduler-ScheduleTime`: For Cloud Scheduler jobs specified in the unix-cron format, this header will contain the job schedule time in RFC3339 UTC "Zulu" format. If the job has a body and the following headers are not set by the user, Cloud Scheduler sets default values: * `Content-Type`: This will be set to `"application/octet-stream"`. You can override this default by explicitly setting `Content-Type` to a particular media type when creating the job. For example, you can set `Content-Type` to `"application/json"`. The headers below are output only. They cannot be set or overridden: * `Content-Length`: This is computed by Cloud Scheduler. * `X-Google-*`: For Google internal use only. * `X-AppEngine-*`: For Google internal use only. In addition, some App Engine headers, which contain job-specific information, are also be sent to the job handler. + "headers": { # HTTP request headers. This map contains the header field names and values. Headers can be set when the job is created. Cloud Scheduler sets some headers to default values: * `User-Agent`: By default, this header is `"AppEngine-Google; (+http://code.google.com/appengine)"`. This header can be modified, but Cloud Scheduler will append `"AppEngine-Google; (+http://code.google.com/appengine)"` to the modified `User-Agent`. * `X-CloudScheduler`: This header will be set to true. * `X-CloudScheduler-JobName`: This header will contain the job name. * `X-CloudScheduler-ScheduleTime`: For Cloud Scheduler jobs specified in the unix-cron format, this header will contain the job schedule as an offset of UTC parsed according to RFC3339. If the job has a body and the following headers are not set by the user, Cloud Scheduler sets default values: * `Content-Type`: This will be set to `"application/octet-stream"`. You can override this default by explicitly setting `Content-Type` to a particular media type when creating the job. For example, you can set `Content-Type` to `"application/json"`. The headers below are output only. They cannot be set or overridden: * `Content-Length`: This is computed by Cloud Scheduler. * `X-Google-*`: For Google internal use only. * `X-AppEngine-*`: For Google internal use only. In addition, some App Engine headers, which contain job-specific information, are also be sent to the job handler. "a_key": "A String", }, "httpMethod": "A String", # The HTTP method to use for the request. PATCH and OPTIONS are not permitted. @@ -559,7 +559,7 @@

Method Details

"description": "A String", # Optionally caller-specified in CreateJob or UpdateJob. A human-readable description for the job. This string must not contain more than 500 characters. "httpTarget": { # Http target. The job will be pushed to the job handler by means of an HTTP request via an http_method such as HTTP POST, HTTP GET, etc. The job is acknowledged by means of an HTTP response code in the range [200 - 299]. A failure to receive a response constitutes a failed execution. For a redirected request, the response returned by the redirected request is considered. # HTTP target. "body": "A String", # HTTP request body. A request body is allowed only if the HTTP method is POST, PUT, or PATCH. It is an error to set body on a job with an incompatible HttpMethod. - "headers": { # HTTP request headers. This map contains the header field names and values. The user can specify HTTP request headers to send with the job's HTTP request. Repeated headers are not supported, but a header value can contain commas. The following headers represent a subset of the headers that accompany the job's HTTP request. Some HTTP request headers are ignored or replaced. A partial list of headers that are ignored or replaced is below: * Host: This will be computed by Cloud Scheduler and derived from uri. * `Content-Length`: This will be computed by Cloud Scheduler. * `User-Agent`: This will be set to `"Google-Cloud-Scheduler"`. * `X-Google-*`: Google internal use only. * `X-AppEngine-*`: Google internal use only. * `X-CloudScheduler`: This header will be set to true. * `X-CloudScheduler-JobName`: This header will contain the job name. * `X-CloudScheduler-ScheduleTime`: For Cloud Scheduler jobs specified in the unix-cron format, this header will contain the job schedule time in RFC3339 UTC "Zulu" format. If the job has a body and the following headers are not set by the user, Cloud Scheduler sets default values: * `Content-Type`: This will be set to `"application/octet-stream"`. You can override this default by explicitly setting `Content-Type` to a particular media type when creating the job. For example, you can set `Content-Type` to `"application/json"`. The total size of headers must be less than 80KB. + "headers": { # HTTP request headers. This map contains the header field names and values. The user can specify HTTP request headers to send with the job's HTTP request. Repeated headers are not supported, but a header value can contain commas. The following headers represent a subset of the headers that accompany the job's HTTP request. Some HTTP request headers are ignored or replaced. A partial list of headers that are ignored or replaced is below: * Host: This will be computed by Cloud Scheduler and derived from uri. * `Content-Length`: This will be computed by Cloud Scheduler. * `User-Agent`: This will be set to `"Google-Cloud-Scheduler"`. * `X-Google-*`: Google internal use only. * `X-AppEngine-*`: Google internal use only. * `X-CloudScheduler`: This header will be set to true. * `X-CloudScheduler-JobName`: This header will contain the job name. * `X-CloudScheduler-ScheduleTime`: For Cloud Scheduler jobs specified in the unix-cron format, this header will contain the job schedule as an offset of UTC parsed according to RFC3339. If the job has a body and the following headers are not set by the user, Cloud Scheduler sets default values: * `Content-Type`: This will be set to `"application/octet-stream"`. You can override this default by explicitly setting `Content-Type` to a particular media type when creating the job. For example, you can set `Content-Type` to `"application/json"`. The total size of headers must be less than 80KB. "a_key": "A String", }, "httpMethod": "A String", # Which HTTP method to use for the request. @@ -635,7 +635,7 @@

Method Details

"version": "A String", # App version. By default, the job is sent to the version which is the default version when the job is attempted. }, "body": "A String", # Body. HTTP request body. A request body is allowed only if the HTTP method is POST or PUT. It will result in invalid argument error to set a body on a job with an incompatible HttpMethod. - "headers": { # HTTP request headers. This map contains the header field names and values. Headers can be set when the job is created. Cloud Scheduler sets some headers to default values: * `User-Agent`: By default, this header is `"AppEngine-Google; (+http://code.google.com/appengine)"`. This header can be modified, but Cloud Scheduler will append `"AppEngine-Google; (+http://code.google.com/appengine)"` to the modified `User-Agent`. * `X-CloudScheduler`: This header will be set to true. * `X-CloudScheduler-JobName`: This header will contain the job name. * `X-CloudScheduler-ScheduleTime`: For Cloud Scheduler jobs specified in the unix-cron format, this header will contain the job schedule time in RFC3339 UTC "Zulu" format. If the job has a body and the following headers are not set by the user, Cloud Scheduler sets default values: * `Content-Type`: This will be set to `"application/octet-stream"`. You can override this default by explicitly setting `Content-Type` to a particular media type when creating the job. For example, you can set `Content-Type` to `"application/json"`. The headers below are output only. They cannot be set or overridden: * `Content-Length`: This is computed by Cloud Scheduler. * `X-Google-*`: For Google internal use only. * `X-AppEngine-*`: For Google internal use only. In addition, some App Engine headers, which contain job-specific information, are also be sent to the job handler. + "headers": { # HTTP request headers. This map contains the header field names and values. Headers can be set when the job is created. Cloud Scheduler sets some headers to default values: * `User-Agent`: By default, this header is `"AppEngine-Google; (+http://code.google.com/appengine)"`. This header can be modified, but Cloud Scheduler will append `"AppEngine-Google; (+http://code.google.com/appengine)"` to the modified `User-Agent`. * `X-CloudScheduler`: This header will be set to true. * `X-CloudScheduler-JobName`: This header will contain the job name. * `X-CloudScheduler-ScheduleTime`: For Cloud Scheduler jobs specified in the unix-cron format, this header will contain the job schedule as an offset of UTC parsed according to RFC3339. If the job has a body and the following headers are not set by the user, Cloud Scheduler sets default values: * `Content-Type`: This will be set to `"application/octet-stream"`. You can override this default by explicitly setting `Content-Type` to a particular media type when creating the job. For example, you can set `Content-Type` to `"application/json"`. The headers below are output only. They cannot be set or overridden: * `Content-Length`: This is computed by Cloud Scheduler. * `X-Google-*`: For Google internal use only. * `X-AppEngine-*`: For Google internal use only. In addition, some App Engine headers, which contain job-specific information, are also be sent to the job handler. "a_key": "A String", }, "httpMethod": "A String", # The HTTP method to use for the request. PATCH and OPTIONS are not permitted. @@ -645,7 +645,7 @@

Method Details

"description": "A String", # Optionally caller-specified in CreateJob or UpdateJob. A human-readable description for the job. This string must not contain more than 500 characters. "httpTarget": { # Http target. The job will be pushed to the job handler by means of an HTTP request via an http_method such as HTTP POST, HTTP GET, etc. The job is acknowledged by means of an HTTP response code in the range [200 - 299]. A failure to receive a response constitutes a failed execution. For a redirected request, the response returned by the redirected request is considered. # HTTP target. "body": "A String", # HTTP request body. A request body is allowed only if the HTTP method is POST, PUT, or PATCH. It is an error to set body on a job with an incompatible HttpMethod. - "headers": { # HTTP request headers. This map contains the header field names and values. The user can specify HTTP request headers to send with the job's HTTP request. Repeated headers are not supported, but a header value can contain commas. The following headers represent a subset of the headers that accompany the job's HTTP request. Some HTTP request headers are ignored or replaced. A partial list of headers that are ignored or replaced is below: * Host: This will be computed by Cloud Scheduler and derived from uri. * `Content-Length`: This will be computed by Cloud Scheduler. * `User-Agent`: This will be set to `"Google-Cloud-Scheduler"`. * `X-Google-*`: Google internal use only. * `X-AppEngine-*`: Google internal use only. * `X-CloudScheduler`: This header will be set to true. * `X-CloudScheduler-JobName`: This header will contain the job name. * `X-CloudScheduler-ScheduleTime`: For Cloud Scheduler jobs specified in the unix-cron format, this header will contain the job schedule time in RFC3339 UTC "Zulu" format. If the job has a body and the following headers are not set by the user, Cloud Scheduler sets default values: * `Content-Type`: This will be set to `"application/octet-stream"`. You can override this default by explicitly setting `Content-Type` to a particular media type when creating the job. For example, you can set `Content-Type` to `"application/json"`. The total size of headers must be less than 80KB. + "headers": { # HTTP request headers. This map contains the header field names and values. The user can specify HTTP request headers to send with the job's HTTP request. Repeated headers are not supported, but a header value can contain commas. The following headers represent a subset of the headers that accompany the job's HTTP request. Some HTTP request headers are ignored or replaced. A partial list of headers that are ignored or replaced is below: * Host: This will be computed by Cloud Scheduler and derived from uri. * `Content-Length`: This will be computed by Cloud Scheduler. * `User-Agent`: This will be set to `"Google-Cloud-Scheduler"`. * `X-Google-*`: Google internal use only. * `X-AppEngine-*`: Google internal use only. * `X-CloudScheduler`: This header will be set to true. * `X-CloudScheduler-JobName`: This header will contain the job name. * `X-CloudScheduler-ScheduleTime`: For Cloud Scheduler jobs specified in the unix-cron format, this header will contain the job schedule as an offset of UTC parsed according to RFC3339. If the job has a body and the following headers are not set by the user, Cloud Scheduler sets default values: * `Content-Type`: This will be set to `"application/octet-stream"`. You can override this default by explicitly setting `Content-Type` to a particular media type when creating the job. For example, you can set `Content-Type` to `"application/json"`. The total size of headers must be less than 80KB. "a_key": "A String", }, "httpMethod": "A String", # Which HTTP method to use for the request. @@ -721,7 +721,7 @@

Method Details

"version": "A String", # App version. By default, the job is sent to the version which is the default version when the job is attempted. }, "body": "A String", # Body. HTTP request body. A request body is allowed only if the HTTP method is POST or PUT. It will result in invalid argument error to set a body on a job with an incompatible HttpMethod. - "headers": { # HTTP request headers. This map contains the header field names and values. Headers can be set when the job is created. Cloud Scheduler sets some headers to default values: * `User-Agent`: By default, this header is `"AppEngine-Google; (+http://code.google.com/appengine)"`. This header can be modified, but Cloud Scheduler will append `"AppEngine-Google; (+http://code.google.com/appengine)"` to the modified `User-Agent`. * `X-CloudScheduler`: This header will be set to true. * `X-CloudScheduler-JobName`: This header will contain the job name. * `X-CloudScheduler-ScheduleTime`: For Cloud Scheduler jobs specified in the unix-cron format, this header will contain the job schedule time in RFC3339 UTC "Zulu" format. If the job has a body and the following headers are not set by the user, Cloud Scheduler sets default values: * `Content-Type`: This will be set to `"application/octet-stream"`. You can override this default by explicitly setting `Content-Type` to a particular media type when creating the job. For example, you can set `Content-Type` to `"application/json"`. The headers below are output only. They cannot be set or overridden: * `Content-Length`: This is computed by Cloud Scheduler. * `X-Google-*`: For Google internal use only. * `X-AppEngine-*`: For Google internal use only. In addition, some App Engine headers, which contain job-specific information, are also be sent to the job handler. + "headers": { # HTTP request headers. This map contains the header field names and values. Headers can be set when the job is created. Cloud Scheduler sets some headers to default values: * `User-Agent`: By default, this header is `"AppEngine-Google; (+http://code.google.com/appengine)"`. This header can be modified, but Cloud Scheduler will append `"AppEngine-Google; (+http://code.google.com/appengine)"` to the modified `User-Agent`. * `X-CloudScheduler`: This header will be set to true. * `X-CloudScheduler-JobName`: This header will contain the job name. * `X-CloudScheduler-ScheduleTime`: For Cloud Scheduler jobs specified in the unix-cron format, this header will contain the job schedule as an offset of UTC parsed according to RFC3339. If the job has a body and the following headers are not set by the user, Cloud Scheduler sets default values: * `Content-Type`: This will be set to `"application/octet-stream"`. You can override this default by explicitly setting `Content-Type` to a particular media type when creating the job. For example, you can set `Content-Type` to `"application/json"`. The headers below are output only. They cannot be set or overridden: * `Content-Length`: This is computed by Cloud Scheduler. * `X-Google-*`: For Google internal use only. * `X-AppEngine-*`: For Google internal use only. In addition, some App Engine headers, which contain job-specific information, are also be sent to the job handler. "a_key": "A String", }, "httpMethod": "A String", # The HTTP method to use for the request. PATCH and OPTIONS are not permitted. @@ -731,7 +731,7 @@

Method Details

"description": "A String", # Optionally caller-specified in CreateJob or UpdateJob. A human-readable description for the job. This string must not contain more than 500 characters. "httpTarget": { # Http target. The job will be pushed to the job handler by means of an HTTP request via an http_method such as HTTP POST, HTTP GET, etc. The job is acknowledged by means of an HTTP response code in the range [200 - 299]. A failure to receive a response constitutes a failed execution. For a redirected request, the response returned by the redirected request is considered. # HTTP target. "body": "A String", # HTTP request body. A request body is allowed only if the HTTP method is POST, PUT, or PATCH. It is an error to set body on a job with an incompatible HttpMethod. - "headers": { # HTTP request headers. This map contains the header field names and values. The user can specify HTTP request headers to send with the job's HTTP request. Repeated headers are not supported, but a header value can contain commas. The following headers represent a subset of the headers that accompany the job's HTTP request. Some HTTP request headers are ignored or replaced. A partial list of headers that are ignored or replaced is below: * Host: This will be computed by Cloud Scheduler and derived from uri. * `Content-Length`: This will be computed by Cloud Scheduler. * `User-Agent`: This will be set to `"Google-Cloud-Scheduler"`. * `X-Google-*`: Google internal use only. * `X-AppEngine-*`: Google internal use only. * `X-CloudScheduler`: This header will be set to true. * `X-CloudScheduler-JobName`: This header will contain the job name. * `X-CloudScheduler-ScheduleTime`: For Cloud Scheduler jobs specified in the unix-cron format, this header will contain the job schedule time in RFC3339 UTC "Zulu" format. If the job has a body and the following headers are not set by the user, Cloud Scheduler sets default values: * `Content-Type`: This will be set to `"application/octet-stream"`. You can override this default by explicitly setting `Content-Type` to a particular media type when creating the job. For example, you can set `Content-Type` to `"application/json"`. The total size of headers must be less than 80KB. + "headers": { # HTTP request headers. This map contains the header field names and values. The user can specify HTTP request headers to send with the job's HTTP request. Repeated headers are not supported, but a header value can contain commas. The following headers represent a subset of the headers that accompany the job's HTTP request. Some HTTP request headers are ignored or replaced. A partial list of headers that are ignored or replaced is below: * Host: This will be computed by Cloud Scheduler and derived from uri. * `Content-Length`: This will be computed by Cloud Scheduler. * `User-Agent`: This will be set to `"Google-Cloud-Scheduler"`. * `X-Google-*`: Google internal use only. * `X-AppEngine-*`: Google internal use only. * `X-CloudScheduler`: This header will be set to true. * `X-CloudScheduler-JobName`: This header will contain the job name. * `X-CloudScheduler-ScheduleTime`: For Cloud Scheduler jobs specified in the unix-cron format, this header will contain the job schedule as an offset of UTC parsed according to RFC3339. If the job has a body and the following headers are not set by the user, Cloud Scheduler sets default values: * `Content-Type`: This will be set to `"application/octet-stream"`. You can override this default by explicitly setting `Content-Type` to a particular media type when creating the job. For example, you can set `Content-Type` to `"application/json"`. The total size of headers must be less than 80KB. "a_key": "A String", }, "httpMethod": "A String", # Which HTTP method to use for the request. @@ -807,7 +807,7 @@

Method Details

"version": "A String", # App version. By default, the job is sent to the version which is the default version when the job is attempted. }, "body": "A String", # Body. HTTP request body. A request body is allowed only if the HTTP method is POST or PUT. It will result in invalid argument error to set a body on a job with an incompatible HttpMethod. - "headers": { # HTTP request headers. This map contains the header field names and values. Headers can be set when the job is created. Cloud Scheduler sets some headers to default values: * `User-Agent`: By default, this header is `"AppEngine-Google; (+http://code.google.com/appengine)"`. This header can be modified, but Cloud Scheduler will append `"AppEngine-Google; (+http://code.google.com/appengine)"` to the modified `User-Agent`. * `X-CloudScheduler`: This header will be set to true. * `X-CloudScheduler-JobName`: This header will contain the job name. * `X-CloudScheduler-ScheduleTime`: For Cloud Scheduler jobs specified in the unix-cron format, this header will contain the job schedule time in RFC3339 UTC "Zulu" format. If the job has a body and the following headers are not set by the user, Cloud Scheduler sets default values: * `Content-Type`: This will be set to `"application/octet-stream"`. You can override this default by explicitly setting `Content-Type` to a particular media type when creating the job. For example, you can set `Content-Type` to `"application/json"`. The headers below are output only. They cannot be set or overridden: * `Content-Length`: This is computed by Cloud Scheduler. * `X-Google-*`: For Google internal use only. * `X-AppEngine-*`: For Google internal use only. In addition, some App Engine headers, which contain job-specific information, are also be sent to the job handler. + "headers": { # HTTP request headers. This map contains the header field names and values. Headers can be set when the job is created. Cloud Scheduler sets some headers to default values: * `User-Agent`: By default, this header is `"AppEngine-Google; (+http://code.google.com/appengine)"`. This header can be modified, but Cloud Scheduler will append `"AppEngine-Google; (+http://code.google.com/appengine)"` to the modified `User-Agent`. * `X-CloudScheduler`: This header will be set to true. * `X-CloudScheduler-JobName`: This header will contain the job name. * `X-CloudScheduler-ScheduleTime`: For Cloud Scheduler jobs specified in the unix-cron format, this header will contain the job schedule as an offset of UTC parsed according to RFC3339. If the job has a body and the following headers are not set by the user, Cloud Scheduler sets default values: * `Content-Type`: This will be set to `"application/octet-stream"`. You can override this default by explicitly setting `Content-Type` to a particular media type when creating the job. For example, you can set `Content-Type` to `"application/json"`. The headers below are output only. They cannot be set or overridden: * `Content-Length`: This is computed by Cloud Scheduler. * `X-Google-*`: For Google internal use only. * `X-AppEngine-*`: For Google internal use only. In addition, some App Engine headers, which contain job-specific information, are also be sent to the job handler. "a_key": "A String", }, "httpMethod": "A String", # The HTTP method to use for the request. PATCH and OPTIONS are not permitted. @@ -817,7 +817,7 @@

Method Details

"description": "A String", # Optionally caller-specified in CreateJob or UpdateJob. A human-readable description for the job. This string must not contain more than 500 characters. "httpTarget": { # Http target. The job will be pushed to the job handler by means of an HTTP request via an http_method such as HTTP POST, HTTP GET, etc. The job is acknowledged by means of an HTTP response code in the range [200 - 299]. A failure to receive a response constitutes a failed execution. For a redirected request, the response returned by the redirected request is considered. # HTTP target. "body": "A String", # HTTP request body. A request body is allowed only if the HTTP method is POST, PUT, or PATCH. It is an error to set body on a job with an incompatible HttpMethod. - "headers": { # HTTP request headers. This map contains the header field names and values. The user can specify HTTP request headers to send with the job's HTTP request. Repeated headers are not supported, but a header value can contain commas. The following headers represent a subset of the headers that accompany the job's HTTP request. Some HTTP request headers are ignored or replaced. A partial list of headers that are ignored or replaced is below: * Host: This will be computed by Cloud Scheduler and derived from uri. * `Content-Length`: This will be computed by Cloud Scheduler. * `User-Agent`: This will be set to `"Google-Cloud-Scheduler"`. * `X-Google-*`: Google internal use only. * `X-AppEngine-*`: Google internal use only. * `X-CloudScheduler`: This header will be set to true. * `X-CloudScheduler-JobName`: This header will contain the job name. * `X-CloudScheduler-ScheduleTime`: For Cloud Scheduler jobs specified in the unix-cron format, this header will contain the job schedule time in RFC3339 UTC "Zulu" format. If the job has a body and the following headers are not set by the user, Cloud Scheduler sets default values: * `Content-Type`: This will be set to `"application/octet-stream"`. You can override this default by explicitly setting `Content-Type` to a particular media type when creating the job. For example, you can set `Content-Type` to `"application/json"`. The total size of headers must be less than 80KB. + "headers": { # HTTP request headers. This map contains the header field names and values. The user can specify HTTP request headers to send with the job's HTTP request. Repeated headers are not supported, but a header value can contain commas. The following headers represent a subset of the headers that accompany the job's HTTP request. Some HTTP request headers are ignored or replaced. A partial list of headers that are ignored or replaced is below: * Host: This will be computed by Cloud Scheduler and derived from uri. * `Content-Length`: This will be computed by Cloud Scheduler. * `User-Agent`: This will be set to `"Google-Cloud-Scheduler"`. * `X-Google-*`: Google internal use only. * `X-AppEngine-*`: Google internal use only. * `X-CloudScheduler`: This header will be set to true. * `X-CloudScheduler-JobName`: This header will contain the job name. * `X-CloudScheduler-ScheduleTime`: For Cloud Scheduler jobs specified in the unix-cron format, this header will contain the job schedule as an offset of UTC parsed according to RFC3339. If the job has a body and the following headers are not set by the user, Cloud Scheduler sets default values: * `Content-Type`: This will be set to `"application/octet-stream"`. You can override this default by explicitly setting `Content-Type` to a particular media type when creating the job. For example, you can set `Content-Type` to `"application/json"`. The total size of headers must be less than 80KB. "a_key": "A String", }, "httpMethod": "A String", # Which HTTP method to use for the request. diff --git a/docs/dyn/cloudscheduler_v1beta1.projects.locations.jobs.html b/docs/dyn/cloudscheduler_v1beta1.projects.locations.jobs.html index 677c3058b0e..23966be2743 100644 --- a/docs/dyn/cloudscheduler_v1beta1.projects.locations.jobs.html +++ b/docs/dyn/cloudscheduler_v1beta1.projects.locations.jobs.html @@ -128,7 +128,7 @@

Method Details

"version": "A String", # App version. By default, the job is sent to the version which is the default version when the job is attempted. }, "body": "A String", # Body. HTTP request body. A request body is allowed only if the HTTP method is POST or PUT. It will result in invalid argument error to set a body on a job with an incompatible HttpMethod. - "headers": { # HTTP request headers. This map contains the header field names and values. Headers can be set when the job is created. Cloud Scheduler sets some headers to default values: * `User-Agent`: By default, this header is `"AppEngine-Google; (+http://code.google.com/appengine)"`. This header can be modified, but Cloud Scheduler will append `"AppEngine-Google; (+http://code.google.com/appengine)"` to the modified `User-Agent`. * `X-CloudScheduler`: This header will be set to true. * `X-CloudScheduler-JobName`: This header will contain the job name. * `X-CloudScheduler-ScheduleTime`: For Cloud Scheduler jobs specified in the unix-cron format, this header will contain the job schedule time in RFC3339 UTC "Zulu" format. If the job has a body and the following headers are not set by the user, Cloud Scheduler sets default values: * `Content-Type`: This will be set to `"application/octet-stream"`. You can override this default by explicitly setting `Content-Type` to a particular media type when creating the job. For example, you can set `Content-Type` to `"application/json"`. The headers below are output only. They cannot be set or overridden: * `Content-Length`: This is computed by Cloud Scheduler. * `X-Google-*`: For Google internal use only. * `X-AppEngine-*`: For Google internal use only. In addition, some App Engine headers, which contain job-specific information, are also be sent to the job handler. + "headers": { # HTTP request headers. This map contains the header field names and values. Headers can be set when the job is created. Cloud Scheduler sets some headers to default values: * `User-Agent`: By default, this header is `"AppEngine-Google; (+http://code.google.com/appengine)"`. This header can be modified, but Cloud Scheduler will append `"AppEngine-Google; (+http://code.google.com/appengine)"` to the modified `User-Agent`. * `X-CloudScheduler`: This header will be set to true. * `X-CloudScheduler-JobName`: This header will contain the job name. * `X-CloudScheduler-ScheduleTime`: For Cloud Scheduler jobs specified in the unix-cron format, this header will contain the job schedule as an offset of UTC parsed according to RFC3339. If the job has a body and the following headers are not set by the user, Cloud Scheduler sets default values: * `Content-Type`: This will be set to `"application/octet-stream"`. You can override this default by explicitly setting `Content-Type` to a particular media type when creating the job. For example, you can set `Content-Type` to `"application/json"`. The headers below are output only. They cannot be set or overridden: * `Content-Length`: This is computed by Cloud Scheduler. * `X-Google-*`: For Google internal use only. * `X-AppEngine-*`: For Google internal use only. In addition, some App Engine headers, which contain job-specific information, are also be sent to the job handler. "a_key": "A String", }, "httpMethod": "A String", # The HTTP method to use for the request. PATCH and OPTIONS are not permitted. @@ -138,7 +138,7 @@

Method Details

"description": "A String", # Optionally caller-specified in CreateJob or UpdateJob. A human-readable description for the job. This string must not contain more than 500 characters. "httpTarget": { # Http target. The job will be pushed to the job handler by means of an HTTP request via an http_method such as HTTP POST, HTTP GET, etc. The job is acknowledged by means of an HTTP response code in the range [200 - 299]. A failure to receive a response constitutes a failed execution. For a redirected request, the response returned by the redirected request is considered. # HTTP target. "body": "A String", # HTTP request body. A request body is allowed only if the HTTP method is POST, PUT, or PATCH. It is an error to set body on a job with an incompatible HttpMethod. - "headers": { # HTTP request headers. This map contains the header field names and values. The user can specify HTTP request headers to send with the job's HTTP request. Repeated headers are not supported, but a header value can contain commas. The following headers represent a subset of the headers that accompany the job's HTTP request. Some HTTP request headers are ignored or replaced. A partial list of headers that are ignored or replaced is below: * Host: This will be computed by Cloud Scheduler and derived from uri. * `Content-Length`: This will be computed by Cloud Scheduler. * `User-Agent`: This will be set to `"Google-Cloud-Scheduler"`. * `X-Google-*`: Google internal use only. * `X-AppEngine-*`: Google internal use only. * `X-CloudScheduler`: This header will be set to true. * `X-CloudScheduler-JobName`: This header will contain the job name. * `X-CloudScheduler-ScheduleTime`: For Cloud Scheduler jobs specified in the unix-cron format, this header will contain the job schedule time in RFC3339 UTC "Zulu" format. If the job has a body and the following headers are not set by the user, Cloud Scheduler sets default values: * `Content-Type`: This will be set to `"application/octet-stream"`. You can override this default by explicitly setting `Content-Type` to a particular media type when creating the job. For example, you can set `Content-Type` to `"application/json"`. The total size of headers must be less than 80KB. + "headers": { # HTTP request headers. This map contains the header field names and values. The user can specify HTTP request headers to send with the job's HTTP request. Repeated headers are not supported, but a header value can contain commas. The following headers represent a subset of the headers that accompany the job's HTTP request. Some HTTP request headers are ignored or replaced. A partial list of headers that are ignored or replaced is below: * Host: This will be computed by Cloud Scheduler and derived from uri. * `Content-Length`: This will be computed by Cloud Scheduler. * `User-Agent`: This will be set to `"Google-Cloud-Scheduler"`. * `X-Google-*`: Google internal use only. * `X-AppEngine-*`: Google internal use only. * `X-CloudScheduler`: This header will be set to true. * `X-CloudScheduler-JobName`: This header will contain the job name. * `X-CloudScheduler-ScheduleTime`: For Cloud Scheduler jobs specified in the unix-cron format, this header will contain the job schedule as an offset of UTC parsed according to RFC3339. If the job has a body and the following headers are not set by the user, Cloud Scheduler sets default values: * `Content-Type`: This will be set to `"application/octet-stream"`. You can override this default by explicitly setting `Content-Type` to a particular media type when creating the job. For example, you can set `Content-Type` to `"application/json"`. The total size of headers must be less than 80KB. "a_key": "A String", }, "httpMethod": "A String", # Which HTTP method to use for the request. @@ -202,7 +202,7 @@

Method Details

"version": "A String", # App version. By default, the job is sent to the version which is the default version when the job is attempted. }, "body": "A String", # Body. HTTP request body. A request body is allowed only if the HTTP method is POST or PUT. It will result in invalid argument error to set a body on a job with an incompatible HttpMethod. - "headers": { # HTTP request headers. This map contains the header field names and values. Headers can be set when the job is created. Cloud Scheduler sets some headers to default values: * `User-Agent`: By default, this header is `"AppEngine-Google; (+http://code.google.com/appengine)"`. This header can be modified, but Cloud Scheduler will append `"AppEngine-Google; (+http://code.google.com/appengine)"` to the modified `User-Agent`. * `X-CloudScheduler`: This header will be set to true. * `X-CloudScheduler-JobName`: This header will contain the job name. * `X-CloudScheduler-ScheduleTime`: For Cloud Scheduler jobs specified in the unix-cron format, this header will contain the job schedule time in RFC3339 UTC "Zulu" format. If the job has a body and the following headers are not set by the user, Cloud Scheduler sets default values: * `Content-Type`: This will be set to `"application/octet-stream"`. You can override this default by explicitly setting `Content-Type` to a particular media type when creating the job. For example, you can set `Content-Type` to `"application/json"`. The headers below are output only. They cannot be set or overridden: * `Content-Length`: This is computed by Cloud Scheduler. * `X-Google-*`: For Google internal use only. * `X-AppEngine-*`: For Google internal use only. In addition, some App Engine headers, which contain job-specific information, are also be sent to the job handler. + "headers": { # HTTP request headers. This map contains the header field names and values. Headers can be set when the job is created. Cloud Scheduler sets some headers to default values: * `User-Agent`: By default, this header is `"AppEngine-Google; (+http://code.google.com/appengine)"`. This header can be modified, but Cloud Scheduler will append `"AppEngine-Google; (+http://code.google.com/appengine)"` to the modified `User-Agent`. * `X-CloudScheduler`: This header will be set to true. * `X-CloudScheduler-JobName`: This header will contain the job name. * `X-CloudScheduler-ScheduleTime`: For Cloud Scheduler jobs specified in the unix-cron format, this header will contain the job schedule as an offset of UTC parsed according to RFC3339. If the job has a body and the following headers are not set by the user, Cloud Scheduler sets default values: * `Content-Type`: This will be set to `"application/octet-stream"`. You can override this default by explicitly setting `Content-Type` to a particular media type when creating the job. For example, you can set `Content-Type` to `"application/json"`. The headers below are output only. They cannot be set or overridden: * `Content-Length`: This is computed by Cloud Scheduler. * `X-Google-*`: For Google internal use only. * `X-AppEngine-*`: For Google internal use only. In addition, some App Engine headers, which contain job-specific information, are also be sent to the job handler. "a_key": "A String", }, "httpMethod": "A String", # The HTTP method to use for the request. PATCH and OPTIONS are not permitted. @@ -212,7 +212,7 @@

Method Details

"description": "A String", # Optionally caller-specified in CreateJob or UpdateJob. A human-readable description for the job. This string must not contain more than 500 characters. "httpTarget": { # Http target. The job will be pushed to the job handler by means of an HTTP request via an http_method such as HTTP POST, HTTP GET, etc. The job is acknowledged by means of an HTTP response code in the range [200 - 299]. A failure to receive a response constitutes a failed execution. For a redirected request, the response returned by the redirected request is considered. # HTTP target. "body": "A String", # HTTP request body. A request body is allowed only if the HTTP method is POST, PUT, or PATCH. It is an error to set body on a job with an incompatible HttpMethod. - "headers": { # HTTP request headers. This map contains the header field names and values. The user can specify HTTP request headers to send with the job's HTTP request. Repeated headers are not supported, but a header value can contain commas. The following headers represent a subset of the headers that accompany the job's HTTP request. Some HTTP request headers are ignored or replaced. A partial list of headers that are ignored or replaced is below: * Host: This will be computed by Cloud Scheduler and derived from uri. * `Content-Length`: This will be computed by Cloud Scheduler. * `User-Agent`: This will be set to `"Google-Cloud-Scheduler"`. * `X-Google-*`: Google internal use only. * `X-AppEngine-*`: Google internal use only. * `X-CloudScheduler`: This header will be set to true. * `X-CloudScheduler-JobName`: This header will contain the job name. * `X-CloudScheduler-ScheduleTime`: For Cloud Scheduler jobs specified in the unix-cron format, this header will contain the job schedule time in RFC3339 UTC "Zulu" format. If the job has a body and the following headers are not set by the user, Cloud Scheduler sets default values: * `Content-Type`: This will be set to `"application/octet-stream"`. You can override this default by explicitly setting `Content-Type` to a particular media type when creating the job. For example, you can set `Content-Type` to `"application/json"`. The total size of headers must be less than 80KB. + "headers": { # HTTP request headers. This map contains the header field names and values. The user can specify HTTP request headers to send with the job's HTTP request. Repeated headers are not supported, but a header value can contain commas. The following headers represent a subset of the headers that accompany the job's HTTP request. Some HTTP request headers are ignored or replaced. A partial list of headers that are ignored or replaced is below: * Host: This will be computed by Cloud Scheduler and derived from uri. * `Content-Length`: This will be computed by Cloud Scheduler. * `User-Agent`: This will be set to `"Google-Cloud-Scheduler"`. * `X-Google-*`: Google internal use only. * `X-AppEngine-*`: Google internal use only. * `X-CloudScheduler`: This header will be set to true. * `X-CloudScheduler-JobName`: This header will contain the job name. * `X-CloudScheduler-ScheduleTime`: For Cloud Scheduler jobs specified in the unix-cron format, this header will contain the job schedule as an offset of UTC parsed according to RFC3339. If the job has a body and the following headers are not set by the user, Cloud Scheduler sets default values: * `Content-Type`: This will be set to `"application/octet-stream"`. You can override this default by explicitly setting `Content-Type` to a particular media type when creating the job. For example, you can set `Content-Type` to `"application/json"`. The total size of headers must be less than 80KB. "a_key": "A String", }, "httpMethod": "A String", # Which HTTP method to use for the request. @@ -302,7 +302,7 @@

Method Details

"version": "A String", # App version. By default, the job is sent to the version which is the default version when the job is attempted. }, "body": "A String", # Body. HTTP request body. A request body is allowed only if the HTTP method is POST or PUT. It will result in invalid argument error to set a body on a job with an incompatible HttpMethod. - "headers": { # HTTP request headers. This map contains the header field names and values. Headers can be set when the job is created. Cloud Scheduler sets some headers to default values: * `User-Agent`: By default, this header is `"AppEngine-Google; (+http://code.google.com/appengine)"`. This header can be modified, but Cloud Scheduler will append `"AppEngine-Google; (+http://code.google.com/appengine)"` to the modified `User-Agent`. * `X-CloudScheduler`: This header will be set to true. * `X-CloudScheduler-JobName`: This header will contain the job name. * `X-CloudScheduler-ScheduleTime`: For Cloud Scheduler jobs specified in the unix-cron format, this header will contain the job schedule time in RFC3339 UTC "Zulu" format. If the job has a body and the following headers are not set by the user, Cloud Scheduler sets default values: * `Content-Type`: This will be set to `"application/octet-stream"`. You can override this default by explicitly setting `Content-Type` to a particular media type when creating the job. For example, you can set `Content-Type` to `"application/json"`. The headers below are output only. They cannot be set or overridden: * `Content-Length`: This is computed by Cloud Scheduler. * `X-Google-*`: For Google internal use only. * `X-AppEngine-*`: For Google internal use only. In addition, some App Engine headers, which contain job-specific information, are also be sent to the job handler. + "headers": { # HTTP request headers. This map contains the header field names and values. Headers can be set when the job is created. Cloud Scheduler sets some headers to default values: * `User-Agent`: By default, this header is `"AppEngine-Google; (+http://code.google.com/appengine)"`. This header can be modified, but Cloud Scheduler will append `"AppEngine-Google; (+http://code.google.com/appengine)"` to the modified `User-Agent`. * `X-CloudScheduler`: This header will be set to true. * `X-CloudScheduler-JobName`: This header will contain the job name. * `X-CloudScheduler-ScheduleTime`: For Cloud Scheduler jobs specified in the unix-cron format, this header will contain the job schedule as an offset of UTC parsed according to RFC3339. If the job has a body and the following headers are not set by the user, Cloud Scheduler sets default values: * `Content-Type`: This will be set to `"application/octet-stream"`. You can override this default by explicitly setting `Content-Type` to a particular media type when creating the job. For example, you can set `Content-Type` to `"application/json"`. The headers below are output only. They cannot be set or overridden: * `Content-Length`: This is computed by Cloud Scheduler. * `X-Google-*`: For Google internal use only. * `X-AppEngine-*`: For Google internal use only. In addition, some App Engine headers, which contain job-specific information, are also be sent to the job handler. "a_key": "A String", }, "httpMethod": "A String", # The HTTP method to use for the request. PATCH and OPTIONS are not permitted. @@ -312,7 +312,7 @@

Method Details

"description": "A String", # Optionally caller-specified in CreateJob or UpdateJob. A human-readable description for the job. This string must not contain more than 500 characters. "httpTarget": { # Http target. The job will be pushed to the job handler by means of an HTTP request via an http_method such as HTTP POST, HTTP GET, etc. The job is acknowledged by means of an HTTP response code in the range [200 - 299]. A failure to receive a response constitutes a failed execution. For a redirected request, the response returned by the redirected request is considered. # HTTP target. "body": "A String", # HTTP request body. A request body is allowed only if the HTTP method is POST, PUT, or PATCH. It is an error to set body on a job with an incompatible HttpMethod. - "headers": { # HTTP request headers. This map contains the header field names and values. The user can specify HTTP request headers to send with the job's HTTP request. Repeated headers are not supported, but a header value can contain commas. The following headers represent a subset of the headers that accompany the job's HTTP request. Some HTTP request headers are ignored or replaced. A partial list of headers that are ignored or replaced is below: * Host: This will be computed by Cloud Scheduler and derived from uri. * `Content-Length`: This will be computed by Cloud Scheduler. * `User-Agent`: This will be set to `"Google-Cloud-Scheduler"`. * `X-Google-*`: Google internal use only. * `X-AppEngine-*`: Google internal use only. * `X-CloudScheduler`: This header will be set to true. * `X-CloudScheduler-JobName`: This header will contain the job name. * `X-CloudScheduler-ScheduleTime`: For Cloud Scheduler jobs specified in the unix-cron format, this header will contain the job schedule time in RFC3339 UTC "Zulu" format. If the job has a body and the following headers are not set by the user, Cloud Scheduler sets default values: * `Content-Type`: This will be set to `"application/octet-stream"`. You can override this default by explicitly setting `Content-Type` to a particular media type when creating the job. For example, you can set `Content-Type` to `"application/json"`. The total size of headers must be less than 80KB. + "headers": { # HTTP request headers. This map contains the header field names and values. The user can specify HTTP request headers to send with the job's HTTP request. Repeated headers are not supported, but a header value can contain commas. The following headers represent a subset of the headers that accompany the job's HTTP request. Some HTTP request headers are ignored or replaced. A partial list of headers that are ignored or replaced is below: * Host: This will be computed by Cloud Scheduler and derived from uri. * `Content-Length`: This will be computed by Cloud Scheduler. * `User-Agent`: This will be set to `"Google-Cloud-Scheduler"`. * `X-Google-*`: Google internal use only. * `X-AppEngine-*`: Google internal use only. * `X-CloudScheduler`: This header will be set to true. * `X-CloudScheduler-JobName`: This header will contain the job name. * `X-CloudScheduler-ScheduleTime`: For Cloud Scheduler jobs specified in the unix-cron format, this header will contain the job schedule as an offset of UTC parsed according to RFC3339. If the job has a body and the following headers are not set by the user, Cloud Scheduler sets default values: * `Content-Type`: This will be set to `"application/octet-stream"`. You can override this default by explicitly setting `Content-Type` to a particular media type when creating the job. For example, you can set `Content-Type` to `"application/json"`. The total size of headers must be less than 80KB. "a_key": "A String", }, "httpMethod": "A String", # Which HTTP method to use for the request. @@ -389,7 +389,7 @@

Method Details

"version": "A String", # App version. By default, the job is sent to the version which is the default version when the job is attempted. }, "body": "A String", # Body. HTTP request body. A request body is allowed only if the HTTP method is POST or PUT. It will result in invalid argument error to set a body on a job with an incompatible HttpMethod. - "headers": { # HTTP request headers. This map contains the header field names and values. Headers can be set when the job is created. Cloud Scheduler sets some headers to default values: * `User-Agent`: By default, this header is `"AppEngine-Google; (+http://code.google.com/appengine)"`. This header can be modified, but Cloud Scheduler will append `"AppEngine-Google; (+http://code.google.com/appengine)"` to the modified `User-Agent`. * `X-CloudScheduler`: This header will be set to true. * `X-CloudScheduler-JobName`: This header will contain the job name. * `X-CloudScheduler-ScheduleTime`: For Cloud Scheduler jobs specified in the unix-cron format, this header will contain the job schedule time in RFC3339 UTC "Zulu" format. If the job has a body and the following headers are not set by the user, Cloud Scheduler sets default values: * `Content-Type`: This will be set to `"application/octet-stream"`. You can override this default by explicitly setting `Content-Type` to a particular media type when creating the job. For example, you can set `Content-Type` to `"application/json"`. The headers below are output only. They cannot be set or overridden: * `Content-Length`: This is computed by Cloud Scheduler. * `X-Google-*`: For Google internal use only. * `X-AppEngine-*`: For Google internal use only. In addition, some App Engine headers, which contain job-specific information, are also be sent to the job handler. + "headers": { # HTTP request headers. This map contains the header field names and values. Headers can be set when the job is created. Cloud Scheduler sets some headers to default values: * `User-Agent`: By default, this header is `"AppEngine-Google; (+http://code.google.com/appengine)"`. This header can be modified, but Cloud Scheduler will append `"AppEngine-Google; (+http://code.google.com/appengine)"` to the modified `User-Agent`. * `X-CloudScheduler`: This header will be set to true. * `X-CloudScheduler-JobName`: This header will contain the job name. * `X-CloudScheduler-ScheduleTime`: For Cloud Scheduler jobs specified in the unix-cron format, this header will contain the job schedule as an offset of UTC parsed according to RFC3339. If the job has a body and the following headers are not set by the user, Cloud Scheduler sets default values: * `Content-Type`: This will be set to `"application/octet-stream"`. You can override this default by explicitly setting `Content-Type` to a particular media type when creating the job. For example, you can set `Content-Type` to `"application/json"`. The headers below are output only. They cannot be set or overridden: * `Content-Length`: This is computed by Cloud Scheduler. * `X-Google-*`: For Google internal use only. * `X-AppEngine-*`: For Google internal use only. In addition, some App Engine headers, which contain job-specific information, are also be sent to the job handler. "a_key": "A String", }, "httpMethod": "A String", # The HTTP method to use for the request. PATCH and OPTIONS are not permitted. @@ -399,7 +399,7 @@

Method Details

"description": "A String", # Optionally caller-specified in CreateJob or UpdateJob. A human-readable description for the job. This string must not contain more than 500 characters. "httpTarget": { # Http target. The job will be pushed to the job handler by means of an HTTP request via an http_method such as HTTP POST, HTTP GET, etc. The job is acknowledged by means of an HTTP response code in the range [200 - 299]. A failure to receive a response constitutes a failed execution. For a redirected request, the response returned by the redirected request is considered. # HTTP target. "body": "A String", # HTTP request body. A request body is allowed only if the HTTP method is POST, PUT, or PATCH. It is an error to set body on a job with an incompatible HttpMethod. - "headers": { # HTTP request headers. This map contains the header field names and values. The user can specify HTTP request headers to send with the job's HTTP request. Repeated headers are not supported, but a header value can contain commas. The following headers represent a subset of the headers that accompany the job's HTTP request. Some HTTP request headers are ignored or replaced. A partial list of headers that are ignored or replaced is below: * Host: This will be computed by Cloud Scheduler and derived from uri. * `Content-Length`: This will be computed by Cloud Scheduler. * `User-Agent`: This will be set to `"Google-Cloud-Scheduler"`. * `X-Google-*`: Google internal use only. * `X-AppEngine-*`: Google internal use only. * `X-CloudScheduler`: This header will be set to true. * `X-CloudScheduler-JobName`: This header will contain the job name. * `X-CloudScheduler-ScheduleTime`: For Cloud Scheduler jobs specified in the unix-cron format, this header will contain the job schedule time in RFC3339 UTC "Zulu" format. If the job has a body and the following headers are not set by the user, Cloud Scheduler sets default values: * `Content-Type`: This will be set to `"application/octet-stream"`. You can override this default by explicitly setting `Content-Type` to a particular media type when creating the job. For example, you can set `Content-Type` to `"application/json"`. The total size of headers must be less than 80KB. + "headers": { # HTTP request headers. This map contains the header field names and values. The user can specify HTTP request headers to send with the job's HTTP request. Repeated headers are not supported, but a header value can contain commas. The following headers represent a subset of the headers that accompany the job's HTTP request. Some HTTP request headers are ignored or replaced. A partial list of headers that are ignored or replaced is below: * Host: This will be computed by Cloud Scheduler and derived from uri. * `Content-Length`: This will be computed by Cloud Scheduler. * `User-Agent`: This will be set to `"Google-Cloud-Scheduler"`. * `X-Google-*`: Google internal use only. * `X-AppEngine-*`: Google internal use only. * `X-CloudScheduler`: This header will be set to true. * `X-CloudScheduler-JobName`: This header will contain the job name. * `X-CloudScheduler-ScheduleTime`: For Cloud Scheduler jobs specified in the unix-cron format, this header will contain the job schedule as an offset of UTC parsed according to RFC3339. If the job has a body and the following headers are not set by the user, Cloud Scheduler sets default values: * `Content-Type`: This will be set to `"application/octet-stream"`. You can override this default by explicitly setting `Content-Type` to a particular media type when creating the job. For example, you can set `Content-Type` to `"application/json"`. The total size of headers must be less than 80KB. "a_key": "A String", }, "httpMethod": "A String", # Which HTTP method to use for the request. @@ -482,7 +482,7 @@

Method Details

"version": "A String", # App version. By default, the job is sent to the version which is the default version when the job is attempted. }, "body": "A String", # Body. HTTP request body. A request body is allowed only if the HTTP method is POST or PUT. It will result in invalid argument error to set a body on a job with an incompatible HttpMethod. - "headers": { # HTTP request headers. This map contains the header field names and values. Headers can be set when the job is created. Cloud Scheduler sets some headers to default values: * `User-Agent`: By default, this header is `"AppEngine-Google; (+http://code.google.com/appengine)"`. This header can be modified, but Cloud Scheduler will append `"AppEngine-Google; (+http://code.google.com/appengine)"` to the modified `User-Agent`. * `X-CloudScheduler`: This header will be set to true. * `X-CloudScheduler-JobName`: This header will contain the job name. * `X-CloudScheduler-ScheduleTime`: For Cloud Scheduler jobs specified in the unix-cron format, this header will contain the job schedule time in RFC3339 UTC "Zulu" format. If the job has a body and the following headers are not set by the user, Cloud Scheduler sets default values: * `Content-Type`: This will be set to `"application/octet-stream"`. You can override this default by explicitly setting `Content-Type` to a particular media type when creating the job. For example, you can set `Content-Type` to `"application/json"`. The headers below are output only. They cannot be set or overridden: * `Content-Length`: This is computed by Cloud Scheduler. * `X-Google-*`: For Google internal use only. * `X-AppEngine-*`: For Google internal use only. In addition, some App Engine headers, which contain job-specific information, are also be sent to the job handler. + "headers": { # HTTP request headers. This map contains the header field names and values. Headers can be set when the job is created. Cloud Scheduler sets some headers to default values: * `User-Agent`: By default, this header is `"AppEngine-Google; (+http://code.google.com/appengine)"`. This header can be modified, but Cloud Scheduler will append `"AppEngine-Google; (+http://code.google.com/appengine)"` to the modified `User-Agent`. * `X-CloudScheduler`: This header will be set to true. * `X-CloudScheduler-JobName`: This header will contain the job name. * `X-CloudScheduler-ScheduleTime`: For Cloud Scheduler jobs specified in the unix-cron format, this header will contain the job schedule as an offset of UTC parsed according to RFC3339. If the job has a body and the following headers are not set by the user, Cloud Scheduler sets default values: * `Content-Type`: This will be set to `"application/octet-stream"`. You can override this default by explicitly setting `Content-Type` to a particular media type when creating the job. For example, you can set `Content-Type` to `"application/json"`. The headers below are output only. They cannot be set or overridden: * `Content-Length`: This is computed by Cloud Scheduler. * `X-Google-*`: For Google internal use only. * `X-AppEngine-*`: For Google internal use only. In addition, some App Engine headers, which contain job-specific information, are also be sent to the job handler. "a_key": "A String", }, "httpMethod": "A String", # The HTTP method to use for the request. PATCH and OPTIONS are not permitted. @@ -492,7 +492,7 @@

Method Details

"description": "A String", # Optionally caller-specified in CreateJob or UpdateJob. A human-readable description for the job. This string must not contain more than 500 characters. "httpTarget": { # Http target. The job will be pushed to the job handler by means of an HTTP request via an http_method such as HTTP POST, HTTP GET, etc. The job is acknowledged by means of an HTTP response code in the range [200 - 299]. A failure to receive a response constitutes a failed execution. For a redirected request, the response returned by the redirected request is considered. # HTTP target. "body": "A String", # HTTP request body. A request body is allowed only if the HTTP method is POST, PUT, or PATCH. It is an error to set body on a job with an incompatible HttpMethod. - "headers": { # HTTP request headers. This map contains the header field names and values. The user can specify HTTP request headers to send with the job's HTTP request. Repeated headers are not supported, but a header value can contain commas. The following headers represent a subset of the headers that accompany the job's HTTP request. Some HTTP request headers are ignored or replaced. A partial list of headers that are ignored or replaced is below: * Host: This will be computed by Cloud Scheduler and derived from uri. * `Content-Length`: This will be computed by Cloud Scheduler. * `User-Agent`: This will be set to `"Google-Cloud-Scheduler"`. * `X-Google-*`: Google internal use only. * `X-AppEngine-*`: Google internal use only. * `X-CloudScheduler`: This header will be set to true. * `X-CloudScheduler-JobName`: This header will contain the job name. * `X-CloudScheduler-ScheduleTime`: For Cloud Scheduler jobs specified in the unix-cron format, this header will contain the job schedule time in RFC3339 UTC "Zulu" format. If the job has a body and the following headers are not set by the user, Cloud Scheduler sets default values: * `Content-Type`: This will be set to `"application/octet-stream"`. You can override this default by explicitly setting `Content-Type` to a particular media type when creating the job. For example, you can set `Content-Type` to `"application/json"`. The total size of headers must be less than 80KB. + "headers": { # HTTP request headers. This map contains the header field names and values. The user can specify HTTP request headers to send with the job's HTTP request. Repeated headers are not supported, but a header value can contain commas. The following headers represent a subset of the headers that accompany the job's HTTP request. Some HTTP request headers are ignored or replaced. A partial list of headers that are ignored or replaced is below: * Host: This will be computed by Cloud Scheduler and derived from uri. * `Content-Length`: This will be computed by Cloud Scheduler. * `User-Agent`: This will be set to `"Google-Cloud-Scheduler"`. * `X-Google-*`: Google internal use only. * `X-AppEngine-*`: Google internal use only. * `X-CloudScheduler`: This header will be set to true. * `X-CloudScheduler-JobName`: This header will contain the job name. * `X-CloudScheduler-ScheduleTime`: For Cloud Scheduler jobs specified in the unix-cron format, this header will contain the job schedule as an offset of UTC parsed according to RFC3339. If the job has a body and the following headers are not set by the user, Cloud Scheduler sets default values: * `Content-Type`: This will be set to `"application/octet-stream"`. You can override this default by explicitly setting `Content-Type` to a particular media type when creating the job. For example, you can set `Content-Type` to `"application/json"`. The total size of headers must be less than 80KB. "a_key": "A String", }, "httpMethod": "A String", # Which HTTP method to use for the request. @@ -557,7 +557,7 @@

Method Details

"version": "A String", # App version. By default, the job is sent to the version which is the default version when the job is attempted. }, "body": "A String", # Body. HTTP request body. A request body is allowed only if the HTTP method is POST or PUT. It will result in invalid argument error to set a body on a job with an incompatible HttpMethod. - "headers": { # HTTP request headers. This map contains the header field names and values. Headers can be set when the job is created. Cloud Scheduler sets some headers to default values: * `User-Agent`: By default, this header is `"AppEngine-Google; (+http://code.google.com/appengine)"`. This header can be modified, but Cloud Scheduler will append `"AppEngine-Google; (+http://code.google.com/appengine)"` to the modified `User-Agent`. * `X-CloudScheduler`: This header will be set to true. * `X-CloudScheduler-JobName`: This header will contain the job name. * `X-CloudScheduler-ScheduleTime`: For Cloud Scheduler jobs specified in the unix-cron format, this header will contain the job schedule time in RFC3339 UTC "Zulu" format. If the job has a body and the following headers are not set by the user, Cloud Scheduler sets default values: * `Content-Type`: This will be set to `"application/octet-stream"`. You can override this default by explicitly setting `Content-Type` to a particular media type when creating the job. For example, you can set `Content-Type` to `"application/json"`. The headers below are output only. They cannot be set or overridden: * `Content-Length`: This is computed by Cloud Scheduler. * `X-Google-*`: For Google internal use only. * `X-AppEngine-*`: For Google internal use only. In addition, some App Engine headers, which contain job-specific information, are also be sent to the job handler. + "headers": { # HTTP request headers. This map contains the header field names and values. Headers can be set when the job is created. Cloud Scheduler sets some headers to default values: * `User-Agent`: By default, this header is `"AppEngine-Google; (+http://code.google.com/appengine)"`. This header can be modified, but Cloud Scheduler will append `"AppEngine-Google; (+http://code.google.com/appengine)"` to the modified `User-Agent`. * `X-CloudScheduler`: This header will be set to true. * `X-CloudScheduler-JobName`: This header will contain the job name. * `X-CloudScheduler-ScheduleTime`: For Cloud Scheduler jobs specified in the unix-cron format, this header will contain the job schedule as an offset of UTC parsed according to RFC3339. If the job has a body and the following headers are not set by the user, Cloud Scheduler sets default values: * `Content-Type`: This will be set to `"application/octet-stream"`. You can override this default by explicitly setting `Content-Type` to a particular media type when creating the job. For example, you can set `Content-Type` to `"application/json"`. The headers below are output only. They cannot be set or overridden: * `Content-Length`: This is computed by Cloud Scheduler. * `X-Google-*`: For Google internal use only. * `X-AppEngine-*`: For Google internal use only. In addition, some App Engine headers, which contain job-specific information, are also be sent to the job handler. "a_key": "A String", }, "httpMethod": "A String", # The HTTP method to use for the request. PATCH and OPTIONS are not permitted. @@ -567,7 +567,7 @@

Method Details

"description": "A String", # Optionally caller-specified in CreateJob or UpdateJob. A human-readable description for the job. This string must not contain more than 500 characters. "httpTarget": { # Http target. The job will be pushed to the job handler by means of an HTTP request via an http_method such as HTTP POST, HTTP GET, etc. The job is acknowledged by means of an HTTP response code in the range [200 - 299]. A failure to receive a response constitutes a failed execution. For a redirected request, the response returned by the redirected request is considered. # HTTP target. "body": "A String", # HTTP request body. A request body is allowed only if the HTTP method is POST, PUT, or PATCH. It is an error to set body on a job with an incompatible HttpMethod. - "headers": { # HTTP request headers. This map contains the header field names and values. The user can specify HTTP request headers to send with the job's HTTP request. Repeated headers are not supported, but a header value can contain commas. The following headers represent a subset of the headers that accompany the job's HTTP request. Some HTTP request headers are ignored or replaced. A partial list of headers that are ignored or replaced is below: * Host: This will be computed by Cloud Scheduler and derived from uri. * `Content-Length`: This will be computed by Cloud Scheduler. * `User-Agent`: This will be set to `"Google-Cloud-Scheduler"`. * `X-Google-*`: Google internal use only. * `X-AppEngine-*`: Google internal use only. * `X-CloudScheduler`: This header will be set to true. * `X-CloudScheduler-JobName`: This header will contain the job name. * `X-CloudScheduler-ScheduleTime`: For Cloud Scheduler jobs specified in the unix-cron format, this header will contain the job schedule time in RFC3339 UTC "Zulu" format. If the job has a body and the following headers are not set by the user, Cloud Scheduler sets default values: * `Content-Type`: This will be set to `"application/octet-stream"`. You can override this default by explicitly setting `Content-Type` to a particular media type when creating the job. For example, you can set `Content-Type` to `"application/json"`. The total size of headers must be less than 80KB. + "headers": { # HTTP request headers. This map contains the header field names and values. The user can specify HTTP request headers to send with the job's HTTP request. Repeated headers are not supported, but a header value can contain commas. The following headers represent a subset of the headers that accompany the job's HTTP request. Some HTTP request headers are ignored or replaced. A partial list of headers that are ignored or replaced is below: * Host: This will be computed by Cloud Scheduler and derived from uri. * `Content-Length`: This will be computed by Cloud Scheduler. * `User-Agent`: This will be set to `"Google-Cloud-Scheduler"`. * `X-Google-*`: Google internal use only. * `X-AppEngine-*`: Google internal use only. * `X-CloudScheduler`: This header will be set to true. * `X-CloudScheduler-JobName`: This header will contain the job name. * `X-CloudScheduler-ScheduleTime`: For Cloud Scheduler jobs specified in the unix-cron format, this header will contain the job schedule as an offset of UTC parsed according to RFC3339. If the job has a body and the following headers are not set by the user, Cloud Scheduler sets default values: * `Content-Type`: This will be set to `"application/octet-stream"`. You can override this default by explicitly setting `Content-Type` to a particular media type when creating the job. For example, you can set `Content-Type` to `"application/json"`. The total size of headers must be less than 80KB. "a_key": "A String", }, "httpMethod": "A String", # Which HTTP method to use for the request. @@ -644,7 +644,7 @@

Method Details

"version": "A String", # App version. By default, the job is sent to the version which is the default version when the job is attempted. }, "body": "A String", # Body. HTTP request body. A request body is allowed only if the HTTP method is POST or PUT. It will result in invalid argument error to set a body on a job with an incompatible HttpMethod. - "headers": { # HTTP request headers. This map contains the header field names and values. Headers can be set when the job is created. Cloud Scheduler sets some headers to default values: * `User-Agent`: By default, this header is `"AppEngine-Google; (+http://code.google.com/appengine)"`. This header can be modified, but Cloud Scheduler will append `"AppEngine-Google; (+http://code.google.com/appengine)"` to the modified `User-Agent`. * `X-CloudScheduler`: This header will be set to true. * `X-CloudScheduler-JobName`: This header will contain the job name. * `X-CloudScheduler-ScheduleTime`: For Cloud Scheduler jobs specified in the unix-cron format, this header will contain the job schedule time in RFC3339 UTC "Zulu" format. If the job has a body and the following headers are not set by the user, Cloud Scheduler sets default values: * `Content-Type`: This will be set to `"application/octet-stream"`. You can override this default by explicitly setting `Content-Type` to a particular media type when creating the job. For example, you can set `Content-Type` to `"application/json"`. The headers below are output only. They cannot be set or overridden: * `Content-Length`: This is computed by Cloud Scheduler. * `X-Google-*`: For Google internal use only. * `X-AppEngine-*`: For Google internal use only. In addition, some App Engine headers, which contain job-specific information, are also be sent to the job handler. + "headers": { # HTTP request headers. This map contains the header field names and values. Headers can be set when the job is created. Cloud Scheduler sets some headers to default values: * `User-Agent`: By default, this header is `"AppEngine-Google; (+http://code.google.com/appengine)"`. This header can be modified, but Cloud Scheduler will append `"AppEngine-Google; (+http://code.google.com/appengine)"` to the modified `User-Agent`. * `X-CloudScheduler`: This header will be set to true. * `X-CloudScheduler-JobName`: This header will contain the job name. * `X-CloudScheduler-ScheduleTime`: For Cloud Scheduler jobs specified in the unix-cron format, this header will contain the job schedule as an offset of UTC parsed according to RFC3339. If the job has a body and the following headers are not set by the user, Cloud Scheduler sets default values: * `Content-Type`: This will be set to `"application/octet-stream"`. You can override this default by explicitly setting `Content-Type` to a particular media type when creating the job. For example, you can set `Content-Type` to `"application/json"`. The headers below are output only. They cannot be set or overridden: * `Content-Length`: This is computed by Cloud Scheduler. * `X-Google-*`: For Google internal use only. * `X-AppEngine-*`: For Google internal use only. In addition, some App Engine headers, which contain job-specific information, are also be sent to the job handler. "a_key": "A String", }, "httpMethod": "A String", # The HTTP method to use for the request. PATCH and OPTIONS are not permitted. @@ -654,7 +654,7 @@

Method Details

"description": "A String", # Optionally caller-specified in CreateJob or UpdateJob. A human-readable description for the job. This string must not contain more than 500 characters. "httpTarget": { # Http target. The job will be pushed to the job handler by means of an HTTP request via an http_method such as HTTP POST, HTTP GET, etc. The job is acknowledged by means of an HTTP response code in the range [200 - 299]. A failure to receive a response constitutes a failed execution. For a redirected request, the response returned by the redirected request is considered. # HTTP target. "body": "A String", # HTTP request body. A request body is allowed only if the HTTP method is POST, PUT, or PATCH. It is an error to set body on a job with an incompatible HttpMethod. - "headers": { # HTTP request headers. This map contains the header field names and values. The user can specify HTTP request headers to send with the job's HTTP request. Repeated headers are not supported, but a header value can contain commas. The following headers represent a subset of the headers that accompany the job's HTTP request. Some HTTP request headers are ignored or replaced. A partial list of headers that are ignored or replaced is below: * Host: This will be computed by Cloud Scheduler and derived from uri. * `Content-Length`: This will be computed by Cloud Scheduler. * `User-Agent`: This will be set to `"Google-Cloud-Scheduler"`. * `X-Google-*`: Google internal use only. * `X-AppEngine-*`: Google internal use only. * `X-CloudScheduler`: This header will be set to true. * `X-CloudScheduler-JobName`: This header will contain the job name. * `X-CloudScheduler-ScheduleTime`: For Cloud Scheduler jobs specified in the unix-cron format, this header will contain the job schedule time in RFC3339 UTC "Zulu" format. If the job has a body and the following headers are not set by the user, Cloud Scheduler sets default values: * `Content-Type`: This will be set to `"application/octet-stream"`. You can override this default by explicitly setting `Content-Type` to a particular media type when creating the job. For example, you can set `Content-Type` to `"application/json"`. The total size of headers must be less than 80KB. + "headers": { # HTTP request headers. This map contains the header field names and values. The user can specify HTTP request headers to send with the job's HTTP request. Repeated headers are not supported, but a header value can contain commas. The following headers represent a subset of the headers that accompany the job's HTTP request. Some HTTP request headers are ignored or replaced. A partial list of headers that are ignored or replaced is below: * Host: This will be computed by Cloud Scheduler and derived from uri. * `Content-Length`: This will be computed by Cloud Scheduler. * `User-Agent`: This will be set to `"Google-Cloud-Scheduler"`. * `X-Google-*`: Google internal use only. * `X-AppEngine-*`: Google internal use only. * `X-CloudScheduler`: This header will be set to true. * `X-CloudScheduler-JobName`: This header will contain the job name. * `X-CloudScheduler-ScheduleTime`: For Cloud Scheduler jobs specified in the unix-cron format, this header will contain the job schedule as an offset of UTC parsed according to RFC3339. If the job has a body and the following headers are not set by the user, Cloud Scheduler sets default values: * `Content-Type`: This will be set to `"application/octet-stream"`. You can override this default by explicitly setting `Content-Type` to a particular media type when creating the job. For example, you can set `Content-Type` to `"application/json"`. The total size of headers must be less than 80KB. "a_key": "A String", }, "httpMethod": "A String", # Which HTTP method to use for the request. @@ -731,7 +731,7 @@

Method Details

"version": "A String", # App version. By default, the job is sent to the version which is the default version when the job is attempted. }, "body": "A String", # Body. HTTP request body. A request body is allowed only if the HTTP method is POST or PUT. It will result in invalid argument error to set a body on a job with an incompatible HttpMethod. - "headers": { # HTTP request headers. This map contains the header field names and values. Headers can be set when the job is created. Cloud Scheduler sets some headers to default values: * `User-Agent`: By default, this header is `"AppEngine-Google; (+http://code.google.com/appengine)"`. This header can be modified, but Cloud Scheduler will append `"AppEngine-Google; (+http://code.google.com/appengine)"` to the modified `User-Agent`. * `X-CloudScheduler`: This header will be set to true. * `X-CloudScheduler-JobName`: This header will contain the job name. * `X-CloudScheduler-ScheduleTime`: For Cloud Scheduler jobs specified in the unix-cron format, this header will contain the job schedule time in RFC3339 UTC "Zulu" format. If the job has a body and the following headers are not set by the user, Cloud Scheduler sets default values: * `Content-Type`: This will be set to `"application/octet-stream"`. You can override this default by explicitly setting `Content-Type` to a particular media type when creating the job. For example, you can set `Content-Type` to `"application/json"`. The headers below are output only. They cannot be set or overridden: * `Content-Length`: This is computed by Cloud Scheduler. * `X-Google-*`: For Google internal use only. * `X-AppEngine-*`: For Google internal use only. In addition, some App Engine headers, which contain job-specific information, are also be sent to the job handler. + "headers": { # HTTP request headers. This map contains the header field names and values. Headers can be set when the job is created. Cloud Scheduler sets some headers to default values: * `User-Agent`: By default, this header is `"AppEngine-Google; (+http://code.google.com/appengine)"`. This header can be modified, but Cloud Scheduler will append `"AppEngine-Google; (+http://code.google.com/appengine)"` to the modified `User-Agent`. * `X-CloudScheduler`: This header will be set to true. * `X-CloudScheduler-JobName`: This header will contain the job name. * `X-CloudScheduler-ScheduleTime`: For Cloud Scheduler jobs specified in the unix-cron format, this header will contain the job schedule as an offset of UTC parsed according to RFC3339. If the job has a body and the following headers are not set by the user, Cloud Scheduler sets default values: * `Content-Type`: This will be set to `"application/octet-stream"`. You can override this default by explicitly setting `Content-Type` to a particular media type when creating the job. For example, you can set `Content-Type` to `"application/json"`. The headers below are output only. They cannot be set or overridden: * `Content-Length`: This is computed by Cloud Scheduler. * `X-Google-*`: For Google internal use only. * `X-AppEngine-*`: For Google internal use only. In addition, some App Engine headers, which contain job-specific information, are also be sent to the job handler. "a_key": "A String", }, "httpMethod": "A String", # The HTTP method to use for the request. PATCH and OPTIONS are not permitted. @@ -741,7 +741,7 @@

Method Details

"description": "A String", # Optionally caller-specified in CreateJob or UpdateJob. A human-readable description for the job. This string must not contain more than 500 characters. "httpTarget": { # Http target. The job will be pushed to the job handler by means of an HTTP request via an http_method such as HTTP POST, HTTP GET, etc. The job is acknowledged by means of an HTTP response code in the range [200 - 299]. A failure to receive a response constitutes a failed execution. For a redirected request, the response returned by the redirected request is considered. # HTTP target. "body": "A String", # HTTP request body. A request body is allowed only if the HTTP method is POST, PUT, or PATCH. It is an error to set body on a job with an incompatible HttpMethod. - "headers": { # HTTP request headers. This map contains the header field names and values. The user can specify HTTP request headers to send with the job's HTTP request. Repeated headers are not supported, but a header value can contain commas. The following headers represent a subset of the headers that accompany the job's HTTP request. Some HTTP request headers are ignored or replaced. A partial list of headers that are ignored or replaced is below: * Host: This will be computed by Cloud Scheduler and derived from uri. * `Content-Length`: This will be computed by Cloud Scheduler. * `User-Agent`: This will be set to `"Google-Cloud-Scheduler"`. * `X-Google-*`: Google internal use only. * `X-AppEngine-*`: Google internal use only. * `X-CloudScheduler`: This header will be set to true. * `X-CloudScheduler-JobName`: This header will contain the job name. * `X-CloudScheduler-ScheduleTime`: For Cloud Scheduler jobs specified in the unix-cron format, this header will contain the job schedule time in RFC3339 UTC "Zulu" format. If the job has a body and the following headers are not set by the user, Cloud Scheduler sets default values: * `Content-Type`: This will be set to `"application/octet-stream"`. You can override this default by explicitly setting `Content-Type` to a particular media type when creating the job. For example, you can set `Content-Type` to `"application/json"`. The total size of headers must be less than 80KB. + "headers": { # HTTP request headers. This map contains the header field names and values. The user can specify HTTP request headers to send with the job's HTTP request. Repeated headers are not supported, but a header value can contain commas. The following headers represent a subset of the headers that accompany the job's HTTP request. Some HTTP request headers are ignored or replaced. A partial list of headers that are ignored or replaced is below: * Host: This will be computed by Cloud Scheduler and derived from uri. * `Content-Length`: This will be computed by Cloud Scheduler. * `User-Agent`: This will be set to `"Google-Cloud-Scheduler"`. * `X-Google-*`: Google internal use only. * `X-AppEngine-*`: Google internal use only. * `X-CloudScheduler`: This header will be set to true. * `X-CloudScheduler-JobName`: This header will contain the job name. * `X-CloudScheduler-ScheduleTime`: For Cloud Scheduler jobs specified in the unix-cron format, this header will contain the job schedule as an offset of UTC parsed according to RFC3339. If the job has a body and the following headers are not set by the user, Cloud Scheduler sets default values: * `Content-Type`: This will be set to `"application/octet-stream"`. You can override this default by explicitly setting `Content-Type` to a particular media type when creating the job. For example, you can set `Content-Type` to `"application/json"`. The total size of headers must be less than 80KB. "a_key": "A String", }, "httpMethod": "A String", # Which HTTP method to use for the request. @@ -819,7 +819,7 @@

Method Details

"version": "A String", # App version. By default, the job is sent to the version which is the default version when the job is attempted. }, "body": "A String", # Body. HTTP request body. A request body is allowed only if the HTTP method is POST or PUT. It will result in invalid argument error to set a body on a job with an incompatible HttpMethod. - "headers": { # HTTP request headers. This map contains the header field names and values. Headers can be set when the job is created. Cloud Scheduler sets some headers to default values: * `User-Agent`: By default, this header is `"AppEngine-Google; (+http://code.google.com/appengine)"`. This header can be modified, but Cloud Scheduler will append `"AppEngine-Google; (+http://code.google.com/appengine)"` to the modified `User-Agent`. * `X-CloudScheduler`: This header will be set to true. * `X-CloudScheduler-JobName`: This header will contain the job name. * `X-CloudScheduler-ScheduleTime`: For Cloud Scheduler jobs specified in the unix-cron format, this header will contain the job schedule time in RFC3339 UTC "Zulu" format. If the job has a body and the following headers are not set by the user, Cloud Scheduler sets default values: * `Content-Type`: This will be set to `"application/octet-stream"`. You can override this default by explicitly setting `Content-Type` to a particular media type when creating the job. For example, you can set `Content-Type` to `"application/json"`. The headers below are output only. They cannot be set or overridden: * `Content-Length`: This is computed by Cloud Scheduler. * `X-Google-*`: For Google internal use only. * `X-AppEngine-*`: For Google internal use only. In addition, some App Engine headers, which contain job-specific information, are also be sent to the job handler. + "headers": { # HTTP request headers. This map contains the header field names and values. Headers can be set when the job is created. Cloud Scheduler sets some headers to default values: * `User-Agent`: By default, this header is `"AppEngine-Google; (+http://code.google.com/appengine)"`. This header can be modified, but Cloud Scheduler will append `"AppEngine-Google; (+http://code.google.com/appengine)"` to the modified `User-Agent`. * `X-CloudScheduler`: This header will be set to true. * `X-CloudScheduler-JobName`: This header will contain the job name. * `X-CloudScheduler-ScheduleTime`: For Cloud Scheduler jobs specified in the unix-cron format, this header will contain the job schedule as an offset of UTC parsed according to RFC3339. If the job has a body and the following headers are not set by the user, Cloud Scheduler sets default values: * `Content-Type`: This will be set to `"application/octet-stream"`. You can override this default by explicitly setting `Content-Type` to a particular media type when creating the job. For example, you can set `Content-Type` to `"application/json"`. The headers below are output only. They cannot be set or overridden: * `Content-Length`: This is computed by Cloud Scheduler. * `X-Google-*`: For Google internal use only. * `X-AppEngine-*`: For Google internal use only. In addition, some App Engine headers, which contain job-specific information, are also be sent to the job handler. "a_key": "A String", }, "httpMethod": "A String", # The HTTP method to use for the request. PATCH and OPTIONS are not permitted. @@ -829,7 +829,7 @@

Method Details

"description": "A String", # Optionally caller-specified in CreateJob or UpdateJob. A human-readable description for the job. This string must not contain more than 500 characters. "httpTarget": { # Http target. The job will be pushed to the job handler by means of an HTTP request via an http_method such as HTTP POST, HTTP GET, etc. The job is acknowledged by means of an HTTP response code in the range [200 - 299]. A failure to receive a response constitutes a failed execution. For a redirected request, the response returned by the redirected request is considered. # HTTP target. "body": "A String", # HTTP request body. A request body is allowed only if the HTTP method is POST, PUT, or PATCH. It is an error to set body on a job with an incompatible HttpMethod. - "headers": { # HTTP request headers. This map contains the header field names and values. The user can specify HTTP request headers to send with the job's HTTP request. Repeated headers are not supported, but a header value can contain commas. The following headers represent a subset of the headers that accompany the job's HTTP request. Some HTTP request headers are ignored or replaced. A partial list of headers that are ignored or replaced is below: * Host: This will be computed by Cloud Scheduler and derived from uri. * `Content-Length`: This will be computed by Cloud Scheduler. * `User-Agent`: This will be set to `"Google-Cloud-Scheduler"`. * `X-Google-*`: Google internal use only. * `X-AppEngine-*`: Google internal use only. * `X-CloudScheduler`: This header will be set to true. * `X-CloudScheduler-JobName`: This header will contain the job name. * `X-CloudScheduler-ScheduleTime`: For Cloud Scheduler jobs specified in the unix-cron format, this header will contain the job schedule time in RFC3339 UTC "Zulu" format. If the job has a body and the following headers are not set by the user, Cloud Scheduler sets default values: * `Content-Type`: This will be set to `"application/octet-stream"`. You can override this default by explicitly setting `Content-Type` to a particular media type when creating the job. For example, you can set `Content-Type` to `"application/json"`. The total size of headers must be less than 80KB. + "headers": { # HTTP request headers. This map contains the header field names and values. The user can specify HTTP request headers to send with the job's HTTP request. Repeated headers are not supported, but a header value can contain commas. The following headers represent a subset of the headers that accompany the job's HTTP request. Some HTTP request headers are ignored or replaced. A partial list of headers that are ignored or replaced is below: * Host: This will be computed by Cloud Scheduler and derived from uri. * `Content-Length`: This will be computed by Cloud Scheduler. * `User-Agent`: This will be set to `"Google-Cloud-Scheduler"`. * `X-Google-*`: Google internal use only. * `X-AppEngine-*`: Google internal use only. * `X-CloudScheduler`: This header will be set to true. * `X-CloudScheduler-JobName`: This header will contain the job name. * `X-CloudScheduler-ScheduleTime`: For Cloud Scheduler jobs specified in the unix-cron format, this header will contain the job schedule as an offset of UTC parsed according to RFC3339. If the job has a body and the following headers are not set by the user, Cloud Scheduler sets default values: * `Content-Type`: This will be set to `"application/octet-stream"`. You can override this default by explicitly setting `Content-Type` to a particular media type when creating the job. For example, you can set `Content-Type` to `"application/json"`. The total size of headers must be less than 80KB. "a_key": "A String", }, "httpMethod": "A String", # Which HTTP method to use for the request. diff --git a/docs/dyn/cloudtasks_v2.projects.locations.queues.tasks.html b/docs/dyn/cloudtasks_v2.projects.locations.queues.tasks.html index 6ae8a5ad033..448d13f7b9b 100644 --- a/docs/dyn/cloudtasks_v2.projects.locations.queues.tasks.html +++ b/docs/dyn/cloudtasks_v2.projects.locations.queues.tasks.html @@ -216,7 +216,7 @@

Method Details

{ # Request message for CreateTask. "responseView": "A String", # The response_view specifies which subset of the Task will be returned. By default response_view is BASIC; not all information is retrieved by default because some data, such as payloads, might be desirable to return only when needed because of its large size or because of the sensitivity of data that it contains. Authorization for FULL requires `cloudtasks.tasks.fullView` [Google IAM](https://cloud.google.com/iam/) permission on the Task resource. - "task": { # A unit of scheduled work. # Required. The task to add. Task names have the following format: `projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks/TASK_ID`. The user can optionally specify a task name. If a name is not specified then the system will generate a random unique task id, which will be set in the task returned in the response. If schedule_time is not set or is in the past then Cloud Tasks will set it to the current time. Task De-duplication: Explicitly specifying a task ID enables task de-duplication. If a task's ID is identical to that of an existing task or a task that was deleted or executed recently then the call will fail with ALREADY_EXISTS. If the task's queue was created using Cloud Tasks, then another task with the same name can't be created for ~1hour after the original task was deleted or executed. If the task's queue was created using queue.yaml or queue.xml, then another task with the same name can't be created for ~9days after the original task was deleted or executed. Because there is an extra lookup cost to identify duplicate task names, these CreateTask calls have significantly increased latency. Using hashed strings for the task id or for the prefix of the task id is recommended. Choosing task ids that are sequential or have sequential prefixes, for example using a timestamp, causes an increase in latency and error rates in all task commands. The infrastructure relies on an approximately uniform distribution of task ids to store and serve tasks efficiently. + "task": { # A unit of scheduled work. # Required. The task to add. Task names have the following format: `projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks/TASK_ID`. The user can optionally specify a task name. If a name is not specified then the system will generate a random unique task id, which will be set in the task returned in the response. If schedule_time is not set or is in the past then Cloud Tasks will set it to the current time. Task De-duplication: Explicitly specifying a task ID enables task de-duplication. If a task's ID is identical to that of an existing task or a task that was deleted or executed recently then the call will fail with ALREADY_EXISTS. The IDs of deleted tasks are not immediately available for reuse. It can take up to 4 hours (or 9 days if the task's queue was created using a queue.yaml or queue.xml) for the task ID to be released and made available again. Because there is an extra lookup cost to identify duplicate task names, these CreateTask calls have significantly increased latency. Using hashed strings for the task id or for the prefix of the task id is recommended. Choosing task ids that are sequential or have sequential prefixes, for example using a timestamp, causes an increase in latency and error rates in all task commands. The infrastructure relies on an approximately uniform distribution of task ids to store and serve tasks efficiently. "appEngineHttpRequest": { # App Engine HTTP request. The message defines the HTTP request that is sent to an App Engine app when the task is dispatched. Using AppEngineHttpRequest requires [`appengine.applications.get`](https://cloud.google.com/appengine/docs/admin-api/access-control) Google IAM permission for the project and the following scope: `https://www.googleapis.com/auth/cloud-platform` The task will be delivered to the App Engine app which belongs to the same project as the queue. For more information, see [How Requests are Routed](https://cloud.google.com/appengine/docs/standard/python/how-requests-are-routed) and how routing is affected by [dispatch files](https://cloud.google.com/appengine/docs/python/config/dispatchref). Traffic is encrypted during transport and never leaves Google datacenters. Because this traffic is carried over a communication mechanism internal to Google, you cannot explicitly set the protocol (for example, HTTP or HTTPS). The request to the handler, however, will appear to have used the HTTP protocol. The AppEngineRouting used to construct the URL that the task is delivered to can be set at the queue-level or task-level: * If app_engine_routing_override is set on the queue, this value is used for all tasks in the queue, no matter what the setting is for the task-level app_engine_routing. The `url` that the task will be sent to is: * `url =` host `+` relative_uri Tasks can be dispatched to secure app handlers, unsecure app handlers, and URIs restricted with [`login: admin`](https://cloud.google.com/appengine/docs/standard/python/config/appref). Because tasks are not run as any user, they cannot be dispatched to URIs restricted with [`login: required`](https://cloud.google.com/appengine/docs/standard/python/config/appref) Task dispatches also do not follow redirects. The task attempt has succeeded if the app's request handler returns an HTTP response code in the range [`200` - `299`]. The task attempt has failed if the app's handler returns a non-2xx response code or Cloud Tasks does not receive response before the deadline. Failed tasks will be retried according to the retry configuration. `503` (Service Unavailable) is considered an App Engine system error instead of an application error and will cause Cloud Tasks' traffic congestion control to temporarily throttle the queue's dispatches. Unlike other types of task targets, a `429` (Too Many Requests) response from an app handler does not cause traffic congestion control to throttle the queue. # HTTP request that is sent to the App Engine app handler. An App Engine task is a task that has AppEngineHttpRequest set. "appEngineRouting": { # App Engine Routing. Defines routing characteristics specific to App Engine - service, version, and instance. For more information about services, versions, and instances see [An Overview of App Engine](https://cloud.google.com/appengine/docs/python/an-overview-of-app-engine), [Microservices Architecture on Google App Engine](https://cloud.google.com/appengine/docs/python/microservices-on-app-engine), [App Engine Standard request routing](https://cloud.google.com/appengine/docs/standard/python/how-requests-are-routed), and [App Engine Flex request routing](https://cloud.google.com/appengine/docs/flexible/python/how-requests-are-routed). Using AppEngineRouting requires [`appengine.applications.get`](https://cloud.google.com/appengine/docs/admin-api/access-control) Google IAM permission for the project and the following scope: `https://www.googleapis.com/auth/cloud-platform` # Task-level setting for App Engine routing. * If app_engine_routing_override is set on the queue, this value is used for all tasks in the queue, no matter what the setting is for the task-level app_engine_routing. "host": "A String", # Output only. The host that the task is sent to. The host is constructed from the domain name of the app associated with the queue's project ID (for example .appspot.com), and the service, version, and instance. Tasks which were created using the App Engine SDK might have a custom domain name. For more information, see [How Requests are Routed](https://cloud.google.com/appengine/docs/standard/python/how-requests-are-routed). diff --git a/docs/dyn/cloudtasks_v2beta2.projects.locations.queues.tasks.html b/docs/dyn/cloudtasks_v2beta2.projects.locations.queues.tasks.html index 26ccc9ddb32..24513166484 100644 --- a/docs/dyn/cloudtasks_v2beta2.projects.locations.queues.tasks.html +++ b/docs/dyn/cloudtasks_v2beta2.projects.locations.queues.tasks.html @@ -354,7 +354,7 @@

Method Details

{ # Request message for CreateTask. "responseView": "A String", # The response_view specifies which subset of the Task will be returned. By default response_view is BASIC; not all information is retrieved by default because some data, such as payloads, might be desirable to return only when needed because of its large size or because of the sensitivity of data that it contains. Authorization for FULL requires `cloudtasks.tasks.fullView` [Google IAM](https://cloud.google.com/iam/) permission on the Task resource. - "task": { # A unit of scheduled work. # Required. The task to add. Task names have the following format: `projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks/TASK_ID`. The user can optionally specify a task name. If a name is not specified then the system will generate a random unique task id, which will be set in the task returned in the response. If schedule_time is not set or is in the past then Cloud Tasks will set it to the current time. Task De-duplication: Explicitly specifying a task ID enables task de-duplication. If a task's ID is identical to that of an existing task or a task that was deleted or completed recently then the call will fail with ALREADY_EXISTS. If the task's queue was created using Cloud Tasks, then another task with the same name can't be created for ~1 hour after the original task was deleted or completed. If the task's queue was created using queue.yaml or queue.xml, then another task with the same name can't be created for ~9 days after the original task was deleted or completed. Because there is an extra lookup cost to identify duplicate task names, these CreateTask calls have significantly increased latency. Using hashed strings for the task id or for the prefix of the task id is recommended. Choosing task ids that are sequential or have sequential prefixes, for example using a timestamp, causes an increase in latency and error rates in all task commands. The infrastructure relies on an approximately uniform distribution of task ids to store and serve tasks efficiently. + "task": { # A unit of scheduled work. # Required. The task to add. Task names have the following format: `projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks/TASK_ID`. The user can optionally specify a task name. If a name is not specified then the system will generate a random unique task id, which will be set in the task returned in the response. If schedule_time is not set or is in the past then Cloud Tasks will set it to the current time. Task De-duplication: Explicitly specifying a task ID enables task de-duplication. If a task's ID is identical to that of an existing task or a task that was deleted or completed recently then the call will fail with ALREADY_EXISTS. The IDs of deleted tasks are not immediately available for reuse. It can take up to 4 hours (or 9 days if the task's queue was created using a queue.yaml or queue.xml) for the task ID to be released and made available again. Because there is an extra lookup cost to identify duplicate task names, these CreateTask calls have significantly increased latency. Using hashed strings for the task id or for the prefix of the task id is recommended. Choosing task ids that are sequential or have sequential prefixes, for example using a timestamp, causes an increase in latency and error rates in all task commands. The infrastructure relies on an approximately uniform distribution of task ids to store and serve tasks efficiently. "appEngineHttpRequest": { # App Engine HTTP request. The message defines the HTTP request that is sent to an App Engine app when the task is dispatched. This proto can only be used for tasks in a queue which has app_engine_http_target set. Using AppEngineHttpRequest requires [`appengine.applications.get`](https://cloud.google.com/appengine/docs/admin-api/access-control) Google IAM permission for the project and the following scope: `https://www.googleapis.com/auth/cloud-platform` The task will be delivered to the App Engine app which belongs to the same project as the queue. For more information, see [How Requests are Routed](https://cloud.google.com/appengine/docs/standard/python/how-requests-are-routed) and how routing is affected by [dispatch files](https://cloud.google.com/appengine/docs/python/config/dispatchref). Traffic is encrypted during transport and never leaves Google datacenters. Because this traffic is carried over a communication mechanism internal to Google, you cannot explicitly set the protocol (for example, HTTP or HTTPS). The request to the handler, however, will appear to have used the HTTP protocol. The AppEngineRouting used to construct the URL that the task is delivered to can be set at the queue-level or task-level: * If set, app_engine_routing_override is used for all tasks in the queue, no matter what the setting is for the task-level app_engine_routing. The `url` that the task will be sent to is: * `url =` host `+` relative_url Tasks can be dispatched to secure app handlers, unsecure app handlers, and URIs restricted with [`login: admin`](https://cloud.google.com/appengine/docs/standard/python/config/appref). Because tasks are not run as any user, they cannot be dispatched to URIs restricted with [`login: required`](https://cloud.google.com/appengine/docs/standard/python/config/appref) Task dispatches also do not follow redirects. The task attempt has succeeded if the app's request handler returns an HTTP response code in the range [`200` - `299`]. The task attempt has failed if the app's handler returns a non-2xx response code or Cloud Tasks does not receive response before the deadline. Failed tasks will be retried according to the retry configuration. `503` (Service Unavailable) is considered an App Engine system error instead of an application error and will cause Cloud Tasks' traffic congestion control to temporarily throttle the queue's dispatches. Unlike other types of task targets, a `429` (Too Many Requests) response from an app handler does not cause traffic congestion control to throttle the queue. # App Engine HTTP request that is sent to the task's target. Can be set only if app_engine_http_target is set on the queue. An App Engine task is a task that has AppEngineHttpRequest set. "appEngineRouting": { # App Engine Routing. Defines routing characteristics specific to App Engine - service, version, and instance. For more information about services, versions, and instances see [An Overview of App Engine](https://cloud.google.com/appengine/docs/python/an-overview-of-app-engine), [Microservices Architecture on Google App Engine](https://cloud.google.com/appengine/docs/python/microservices-on-app-engine), [App Engine Standard request routing](https://cloud.google.com/appengine/docs/standard/python/how-requests-are-routed), and [App Engine Flex request routing](https://cloud.google.com/appengine/docs/flexible/python/how-requests-are-routed). # Task-level setting for App Engine routing. If set, app_engine_routing_override is used for all tasks in the queue, no matter what the setting is for the task-level app_engine_routing. "host": "A String", # Output only. The host that the task is sent to. For more information, see [How Requests are Routed](https://cloud.google.com/appengine/docs/standard/python/how-requests-are-routed). The host is constructed as: * `host = [application_domain_name]` `| [service] + '.' + [application_domain_name]` `| [version] + '.' + [application_domain_name]` `| [version_dot_service]+ '.' + [application_domain_name]` `| [instance] + '.' + [application_domain_name]` `| [instance_dot_service] + '.' + [application_domain_name]` `| [instance_dot_version] + '.' + [application_domain_name]` `| [instance_dot_version_dot_service] + '.' + [application_domain_name]` * `application_domain_name` = The domain name of the app, for example .appspot.com, which is associated with the queue's project ID. Some tasks which were created using the App Engine SDK use a custom domain name. * `service =` service * `version =` version * `version_dot_service =` version `+ '.' +` service * `instance =` instance * `instance_dot_service =` instance `+ '.' +` service * `instance_dot_version =` instance `+ '.' +` version * `instance_dot_version_dot_service =` instance `+ '.' +` version `+ '.' +` service If service is empty, then the task will be sent to the service which is the default service when the task is attempted. If version is empty, then the task will be sent to the version which is the default version when the task is attempted. If instance is empty, then the task will be sent to an instance which is available when the task is attempted. If service, version, or instance is invalid, then the task will be sent to the default version of the default service when the task is attempted. diff --git a/docs/dyn/cloudtasks_v2beta3.projects.locations.queues.tasks.html b/docs/dyn/cloudtasks_v2beta3.projects.locations.queues.tasks.html index f465c41b0bd..c9f8de275ce 100644 --- a/docs/dyn/cloudtasks_v2beta3.projects.locations.queues.tasks.html +++ b/docs/dyn/cloudtasks_v2beta3.projects.locations.queues.tasks.html @@ -220,7 +220,7 @@

Method Details

{ # Request message for CreateTask. "responseView": "A String", # The response_view specifies which subset of the Task will be returned. By default response_view is BASIC; not all information is retrieved by default because some data, such as payloads, might be desirable to return only when needed because of its large size or because of the sensitivity of data that it contains. Authorization for FULL requires `cloudtasks.tasks.fullView` [Google IAM](https://cloud.google.com/iam/) permission on the Task resource. - "task": { # A unit of scheduled work. # Required. The task to add. Task names have the following format: `projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks/TASK_ID`. The user can optionally specify a task name. If a name is not specified then the system will generate a random unique task id, which will be set in the task returned in the response. If schedule_time is not set or is in the past then Cloud Tasks will set it to the current time. Task De-duplication: Explicitly specifying a task ID enables task de-duplication. If a task's ID is identical to that of an existing task or a task that was deleted or executed recently then the call will fail with ALREADY_EXISTS. If the task's queue was created using Cloud Tasks, then another task with the same name can't be created for ~1 hour after the original task was deleted or executed. If the task's queue was created using queue.yaml or queue.xml, then another task with the same name can't be created for ~9 days after the original task was deleted or executed. Because there is an extra lookup cost to identify duplicate task names, these CreateTask calls have significantly increased latency. Using hashed strings for the task id or for the prefix of the task id is recommended. Choosing task ids that are sequential or have sequential prefixes, for example using a timestamp, causes an increase in latency and error rates in all task commands. The infrastructure relies on an approximately uniform distribution of task ids to store and serve tasks efficiently. + "task": { # A unit of scheduled work. # Required. The task to add. Task names have the following format: `projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks/TASK_ID`. The user can optionally specify a task name. If a name is not specified then the system will generate a random unique task id, which will be set in the task returned in the response. If schedule_time is not set or is in the past then Cloud Tasks will set it to the current time. Task De-duplication: Explicitly specifying a task ID enables task de-duplication. If a task's ID is identical to that of an existing task or a task that was deleted or executed recently then the call will fail with ALREADY_EXISTS. The IDs of deleted tasks are not immediately available for reuse. It can take up to 4 hours (or 9 days if the task's queue was created using a queue.yaml or queue.xml) for the task ID to be released and made available again. Because there is an extra lookup cost to identify duplicate task names, these CreateTask calls have significantly increased latency. Using hashed strings for the task id or for the prefix of the task id is recommended. Choosing task ids that are sequential or have sequential prefixes, for example using a timestamp, causes an increase in latency and error rates in all task commands. The infrastructure relies on an approximately uniform distribution of task ids to store and serve tasks efficiently. "appEngineHttpRequest": { # App Engine HTTP request. The message defines the HTTP request that is sent to an App Engine app when the task is dispatched. Using AppEngineHttpRequest requires [`appengine.applications.get`](https://cloud.google.com/appengine/docs/admin-api/access-control) Google IAM permission for the project and the following scope: `https://www.googleapis.com/auth/cloud-platform` The task will be delivered to the App Engine app which belongs to the same project as the queue. For more information, see [How Requests are Routed](https://cloud.google.com/appengine/docs/standard/python/how-requests-are-routed) and how routing is affected by [dispatch files](https://cloud.google.com/appengine/docs/python/config/dispatchref). Traffic is encrypted during transport and never leaves Google datacenters. Because this traffic is carried over a communication mechanism internal to Google, you cannot explicitly set the protocol (for example, HTTP or HTTPS). The request to the handler, however, will appear to have used the HTTP protocol. The AppEngineRouting used to construct the URL that the task is delivered to can be set at the queue-level or task-level: * If set, app_engine_routing_override is used for all tasks in the queue, no matter what the setting is for the task-level app_engine_routing. The `url` that the task will be sent to is: * `url =` host `+` relative_uri Tasks can be dispatched to secure app handlers, unsecure app handlers, and URIs restricted with [`login: admin`](https://cloud.google.com/appengine/docs/standard/python/config/appref). Because tasks are not run as any user, they cannot be dispatched to URIs restricted with [`login: required`](https://cloud.google.com/appengine/docs/standard/python/config/appref) Task dispatches also do not follow redirects. The task attempt has succeeded if the app's request handler returns an HTTP response code in the range [`200` - `299`]. The task attempt has failed if the app's handler returns a non-2xx response code or Cloud Tasks does not receive response before the deadline. Failed tasks will be retried according to the retry configuration. `503` (Service Unavailable) is considered an App Engine system error instead of an application error and will cause Cloud Tasks' traffic congestion control to temporarily throttle the queue's dispatches. Unlike other types of task targets, a `429` (Too Many Requests) response from an app handler does not cause traffic congestion control to throttle the queue. # HTTP request that is sent to the App Engine app handler. An App Engine task is a task that has AppEngineHttpRequest set. "appEngineRouting": { # App Engine Routing. Defines routing characteristics specific to App Engine - service, version, and instance. For more information about services, versions, and instances see [An Overview of App Engine](https://cloud.google.com/appengine/docs/python/an-overview-of-app-engine), [Microservices Architecture on Google App Engine](https://cloud.google.com/appengine/docs/python/microservices-on-app-engine), [App Engine Standard request routing](https://cloud.google.com/appengine/docs/standard/python/how-requests-are-routed), and [App Engine Flex request routing](https://cloud.google.com/appengine/docs/flexible/python/how-requests-are-routed). # Task-level setting for App Engine routing. If set, app_engine_routing_override is used for all tasks in the queue, no matter what the setting is for the task-level app_engine_routing. "host": "A String", # Output only. The host that the task is sent to. The host is constructed from the domain name of the app associated with the queue's project ID (for example .appspot.com), and the service, version, and instance. Tasks which were created using the App Engine SDK might have a custom domain name. For more information, see [How Requests are Routed](https://cloud.google.com/appengine/docs/standard/python/how-requests-are-routed). diff --git a/docs/dyn/connectors_v1.projects.locations.global_.customConnectors.customConnectorVersions.html b/docs/dyn/connectors_v1.projects.locations.global_.customConnectors.customConnectorVersions.html index 55fbb3ce13b..2d1c7f6d413 100644 --- a/docs/dyn/connectors_v1.projects.locations.global_.customConnectors.customConnectorVersions.html +++ b/docs/dyn/connectors_v1.projects.locations.global_.customConnectors.customConnectorVersions.html @@ -111,7 +111,7 @@

Method Details

The object takes the form of: { # CustomConnectorVersion indicates a specific version of a connector. - "authConfig": { # AuthConfig defines details of a authentication type. # Required. Configuration for establishing the authentication to the connector destination. + "authConfig": { # AuthConfig defines details of a authentication type. # Optional. Authentication config for accessing connector facade/ proxy. This is used only when enable_backend_destination_config is true. "additionalVariables": [ # List containing additional auth configs. { # ConfigVariable represents a configuration variable present in a Connection. or AuthConfig. "boolValue": True or False, # Value is a bool. @@ -176,23 +176,78 @@

Method Details

"username": "A String", # Username. }, }, - "createTime": "A String", # Output only. Created time. - "destinationConfig": { # Define the Connectors target endpoint. # Required. Configuration of the customConnector's destination. - "destinations": [ # The destinations for the key. - { - "host": "A String", # For publicly routable host. - "port": 42, # The port is the target port number that is accepted by the destination. - "serviceAttachment": "A String", # PSC service attachments. Format: projects/*/regions/*/serviceAttachments/* + "backendVariableTemplates": [ # Optional. Backend variables config templates. This translates to additional variable templates in connection. + { # ConfigVariableTemplate provides metadata about a `ConfigVariable` that is used in a Connection. + "authorizationCodeLink": { # This configuration captures the details required to render an authorization link for the OAuth Authorization Code Flow. # Authorization code link options. To be populated if `ValueType` is `AUTHORIZATION_CODE` + "clientId": "A String", # The client ID assigned to the Google Cloud Connectors OAuth app for the connector data source. + "enablePkce": True or False, # Whether to enable PKCE for the auth code flow. + "scopes": [ # The scopes for which the user will authorize Google Cloud Connectors on the connector data source. + "A String", + ], + "uri": "A String", # The base URI the user must click to trigger the authorization code login flow. + }, + "description": "A String", # Description. + "displayName": "A String", # Display name of the parameter. + "enumOptions": [ # Enum options. To be populated if `ValueType` is `ENUM` + { # EnumOption definition + "displayName": "A String", # Display name of the option. + "id": "A String", # Id of the option. + }, + ], + "isAdvanced": True or False, # Indicates if current template is part of advanced settings + "key": "A String", # Key of the config variable. + "locationType": "A String", # Optional. Location Tyep denotes where this value should be sent in BYOC connections. + "required": True or False, # Flag represents that this `ConfigVariable` must be provided for a connection. + "requiredCondition": { # Struct for representing boolean expressions. # Condition under which a field would be required. The condition can be represented in the form of a logical expression. + "fieldComparisons": [ # A list of fields to be compared. + { # Field that needs to be compared. + "boolValue": True or False, # Boolean value + "comparator": "A String", # Comparator to use for comparing the field value. + "intValue": "A String", # Integer value + "key": "A String", # Key of the field. + "stringValue": "A String", # String value + }, + ], + "logicalExpressions": [ # A list of nested conditions to be compared. + # Object with schema name: LogicalExpression + ], + "logicalOperator": "A String", # The logical operator to use between the fields and conditions. + }, + "roleGrant": { # This configuration defines all the Cloud IAM roles that needs to be granted to a particular Google Cloud resource for the selected principal like service account. These configurations will let UI display to customers what IAM roles need to be granted by them. Or these configurations can be used by the UI to render a 'grant' button to do the same on behalf of the user. # Role grant configuration for the config variable. + "helperTextTemplate": "A String", # Template that UI can use to provide helper text to customers. + "principal": "A String", # Prinicipal/Identity for whom the role need to assigned. + "resource": { # Resource definition # Resource on which the roles needs to be granted for the principal. + "pathTemplate": "A String", # Template to uniquely represent a Google Cloud resource in a format IAM expects This is a template that can have references to other values provided in the config variable template. + "type": "A String", # Different types of resource supported. + }, + "roles": [ # List of roles that need to be granted. + "A String", + ], }, - ], - "key": "A String", # The key is the destination identifier that is supported by the Connector. - }, - "enableBackendDestinationConfig": True or False, # Optional. Whether to enable backend destination config. This is the backend server that the connector connects to. + "state": "A String", # State of the config variable. + "validationRegex": "A String", # Regular expression in RE2 syntax used for validating the `value` of a `ConfigVariable`. + "valueType": "A String", # Type of the parameter: string, int, bool etc. consider custom type for the benefit for the validation. + }, + ], + "createTime": "A String", # Output only. Created time. + "destinationConfigs": [ # Optional. Destination config(s) for accessing connector facade/ proxy. This is used only when enable_backend_destination_config is true. + { # Define the Connectors target endpoint. + "destinations": [ # The destinations for the key. + { + "host": "A String", # For publicly routable host. + "port": 42, # The port is the target port number that is accepted by the destination. + "serviceAttachment": "A String", # PSC service attachments. Format: projects/*/regions/*/serviceAttachments/* + }, + ], + "key": "A String", # The key is the destination identifier that is supported by the Connector. + }, + ], + "enableBackendDestinationConfig": True or False, # Optional. When enabled, the connector will be a facade/ proxy, and connects to the destination provided during connection creation. "labels": { # Optional. Resource labels to represent user-provided metadata. Refer to cloud documentation on labels for more details. https://cloud.google.com/compute/docs/labeling-resources "a_key": "A String", }, "name": "A String", # Output only. Identifier. Resource name of the Version. Format: projects/{project}/locations/{location}/customConnectors/{custom_connector}/customConnectorVersions/{custom_connector_version} - "serviceAccount": "A String", # Required. Service account needed for runtime plane to access Custom Connector secrets. + "serviceAccount": "A String", # Required. Service account used by runtime plane to access auth config secrets. "specLocation": "A String", # Optional. Location of the custom connector spec. "updateTime": "A String", # Output only. Updated time. } @@ -277,7 +332,7 @@

Method Details

An object of the form: { # CustomConnectorVersion indicates a specific version of a connector. - "authConfig": { # AuthConfig defines details of a authentication type. # Required. Configuration for establishing the authentication to the connector destination. + "authConfig": { # AuthConfig defines details of a authentication type. # Optional. Authentication config for accessing connector facade/ proxy. This is used only when enable_backend_destination_config is true. "additionalVariables": [ # List containing additional auth configs. { # ConfigVariable represents a configuration variable present in a Connection. or AuthConfig. "boolValue": True or False, # Value is a bool. @@ -342,23 +397,78 @@

Method Details

"username": "A String", # Username. }, }, - "createTime": "A String", # Output only. Created time. - "destinationConfig": { # Define the Connectors target endpoint. # Required. Configuration of the customConnector's destination. - "destinations": [ # The destinations for the key. - { - "host": "A String", # For publicly routable host. - "port": 42, # The port is the target port number that is accepted by the destination. - "serviceAttachment": "A String", # PSC service attachments. Format: projects/*/regions/*/serviceAttachments/* + "backendVariableTemplates": [ # Optional. Backend variables config templates. This translates to additional variable templates in connection. + { # ConfigVariableTemplate provides metadata about a `ConfigVariable` that is used in a Connection. + "authorizationCodeLink": { # This configuration captures the details required to render an authorization link for the OAuth Authorization Code Flow. # Authorization code link options. To be populated if `ValueType` is `AUTHORIZATION_CODE` + "clientId": "A String", # The client ID assigned to the Google Cloud Connectors OAuth app for the connector data source. + "enablePkce": True or False, # Whether to enable PKCE for the auth code flow. + "scopes": [ # The scopes for which the user will authorize Google Cloud Connectors on the connector data source. + "A String", + ], + "uri": "A String", # The base URI the user must click to trigger the authorization code login flow. + }, + "description": "A String", # Description. + "displayName": "A String", # Display name of the parameter. + "enumOptions": [ # Enum options. To be populated if `ValueType` is `ENUM` + { # EnumOption definition + "displayName": "A String", # Display name of the option. + "id": "A String", # Id of the option. + }, + ], + "isAdvanced": True or False, # Indicates if current template is part of advanced settings + "key": "A String", # Key of the config variable. + "locationType": "A String", # Optional. Location Tyep denotes where this value should be sent in BYOC connections. + "required": True or False, # Flag represents that this `ConfigVariable` must be provided for a connection. + "requiredCondition": { # Struct for representing boolean expressions. # Condition under which a field would be required. The condition can be represented in the form of a logical expression. + "fieldComparisons": [ # A list of fields to be compared. + { # Field that needs to be compared. + "boolValue": True or False, # Boolean value + "comparator": "A String", # Comparator to use for comparing the field value. + "intValue": "A String", # Integer value + "key": "A String", # Key of the field. + "stringValue": "A String", # String value + }, + ], + "logicalExpressions": [ # A list of nested conditions to be compared. + # Object with schema name: LogicalExpression + ], + "logicalOperator": "A String", # The logical operator to use between the fields and conditions. + }, + "roleGrant": { # This configuration defines all the Cloud IAM roles that needs to be granted to a particular Google Cloud resource for the selected principal like service account. These configurations will let UI display to customers what IAM roles need to be granted by them. Or these configurations can be used by the UI to render a 'grant' button to do the same on behalf of the user. # Role grant configuration for the config variable. + "helperTextTemplate": "A String", # Template that UI can use to provide helper text to customers. + "principal": "A String", # Prinicipal/Identity for whom the role need to assigned. + "resource": { # Resource definition # Resource on which the roles needs to be granted for the principal. + "pathTemplate": "A String", # Template to uniquely represent a Google Cloud resource in a format IAM expects This is a template that can have references to other values provided in the config variable template. + "type": "A String", # Different types of resource supported. + }, + "roles": [ # List of roles that need to be granted. + "A String", + ], }, - ], - "key": "A String", # The key is the destination identifier that is supported by the Connector. - }, - "enableBackendDestinationConfig": True or False, # Optional. Whether to enable backend destination config. This is the backend server that the connector connects to. + "state": "A String", # State of the config variable. + "validationRegex": "A String", # Regular expression in RE2 syntax used for validating the `value` of a `ConfigVariable`. + "valueType": "A String", # Type of the parameter: string, int, bool etc. consider custom type for the benefit for the validation. + }, + ], + "createTime": "A String", # Output only. Created time. + "destinationConfigs": [ # Optional. Destination config(s) for accessing connector facade/ proxy. This is used only when enable_backend_destination_config is true. + { # Define the Connectors target endpoint. + "destinations": [ # The destinations for the key. + { + "host": "A String", # For publicly routable host. + "port": 42, # The port is the target port number that is accepted by the destination. + "serviceAttachment": "A String", # PSC service attachments. Format: projects/*/regions/*/serviceAttachments/* + }, + ], + "key": "A String", # The key is the destination identifier that is supported by the Connector. + }, + ], + "enableBackendDestinationConfig": True or False, # Optional. When enabled, the connector will be a facade/ proxy, and connects to the destination provided during connection creation. "labels": { # Optional. Resource labels to represent user-provided metadata. Refer to cloud documentation on labels for more details. https://cloud.google.com/compute/docs/labeling-resources "a_key": "A String", }, "name": "A String", # Output only. Identifier. Resource name of the Version. Format: projects/{project}/locations/{location}/customConnectors/{custom_connector}/customConnectorVersions/{custom_connector_version} - "serviceAccount": "A String", # Required. Service account needed for runtime plane to access Custom Connector secrets. + "serviceAccount": "A String", # Required. Service account used by runtime plane to access auth config secrets. "specLocation": "A String", # Optional. Location of the custom connector spec. "updateTime": "A String", # Output only. Updated time. }
@@ -383,7 +493,7 @@

Method Details

{ # Response message for Connectors.ListCustomConnectorVersions. "customConnectorVersions": [ # A list of connector versions. { # CustomConnectorVersion indicates a specific version of a connector. - "authConfig": { # AuthConfig defines details of a authentication type. # Required. Configuration for establishing the authentication to the connector destination. + "authConfig": { # AuthConfig defines details of a authentication type. # Optional. Authentication config for accessing connector facade/ proxy. This is used only when enable_backend_destination_config is true. "additionalVariables": [ # List containing additional auth configs. { # ConfigVariable represents a configuration variable present in a Connection. or AuthConfig. "boolValue": True or False, # Value is a bool. @@ -448,23 +558,78 @@

Method Details

"username": "A String", # Username. }, }, - "createTime": "A String", # Output only. Created time. - "destinationConfig": { # Define the Connectors target endpoint. # Required. Configuration of the customConnector's destination. - "destinations": [ # The destinations for the key. - { - "host": "A String", # For publicly routable host. - "port": 42, # The port is the target port number that is accepted by the destination. - "serviceAttachment": "A String", # PSC service attachments. Format: projects/*/regions/*/serviceAttachments/* + "backendVariableTemplates": [ # Optional. Backend variables config templates. This translates to additional variable templates in connection. + { # ConfigVariableTemplate provides metadata about a `ConfigVariable` that is used in a Connection. + "authorizationCodeLink": { # This configuration captures the details required to render an authorization link for the OAuth Authorization Code Flow. # Authorization code link options. To be populated if `ValueType` is `AUTHORIZATION_CODE` + "clientId": "A String", # The client ID assigned to the Google Cloud Connectors OAuth app for the connector data source. + "enablePkce": True or False, # Whether to enable PKCE for the auth code flow. + "scopes": [ # The scopes for which the user will authorize Google Cloud Connectors on the connector data source. + "A String", + ], + "uri": "A String", # The base URI the user must click to trigger the authorization code login flow. }, - ], - "key": "A String", # The key is the destination identifier that is supported by the Connector. - }, - "enableBackendDestinationConfig": True or False, # Optional. Whether to enable backend destination config. This is the backend server that the connector connects to. + "description": "A String", # Description. + "displayName": "A String", # Display name of the parameter. + "enumOptions": [ # Enum options. To be populated if `ValueType` is `ENUM` + { # EnumOption definition + "displayName": "A String", # Display name of the option. + "id": "A String", # Id of the option. + }, + ], + "isAdvanced": True or False, # Indicates if current template is part of advanced settings + "key": "A String", # Key of the config variable. + "locationType": "A String", # Optional. Location Tyep denotes where this value should be sent in BYOC connections. + "required": True or False, # Flag represents that this `ConfigVariable` must be provided for a connection. + "requiredCondition": { # Struct for representing boolean expressions. # Condition under which a field would be required. The condition can be represented in the form of a logical expression. + "fieldComparisons": [ # A list of fields to be compared. + { # Field that needs to be compared. + "boolValue": True or False, # Boolean value + "comparator": "A String", # Comparator to use for comparing the field value. + "intValue": "A String", # Integer value + "key": "A String", # Key of the field. + "stringValue": "A String", # String value + }, + ], + "logicalExpressions": [ # A list of nested conditions to be compared. + # Object with schema name: LogicalExpression + ], + "logicalOperator": "A String", # The logical operator to use between the fields and conditions. + }, + "roleGrant": { # This configuration defines all the Cloud IAM roles that needs to be granted to a particular Google Cloud resource for the selected principal like service account. These configurations will let UI display to customers what IAM roles need to be granted by them. Or these configurations can be used by the UI to render a 'grant' button to do the same on behalf of the user. # Role grant configuration for the config variable. + "helperTextTemplate": "A String", # Template that UI can use to provide helper text to customers. + "principal": "A String", # Prinicipal/Identity for whom the role need to assigned. + "resource": { # Resource definition # Resource on which the roles needs to be granted for the principal. + "pathTemplate": "A String", # Template to uniquely represent a Google Cloud resource in a format IAM expects This is a template that can have references to other values provided in the config variable template. + "type": "A String", # Different types of resource supported. + }, + "roles": [ # List of roles that need to be granted. + "A String", + ], + }, + "state": "A String", # State of the config variable. + "validationRegex": "A String", # Regular expression in RE2 syntax used for validating the `value` of a `ConfigVariable`. + "valueType": "A String", # Type of the parameter: string, int, bool etc. consider custom type for the benefit for the validation. + }, + ], + "createTime": "A String", # Output only. Created time. + "destinationConfigs": [ # Optional. Destination config(s) for accessing connector facade/ proxy. This is used only when enable_backend_destination_config is true. + { # Define the Connectors target endpoint. + "destinations": [ # The destinations for the key. + { + "host": "A String", # For publicly routable host. + "port": 42, # The port is the target port number that is accepted by the destination. + "serviceAttachment": "A String", # PSC service attachments. Format: projects/*/regions/*/serviceAttachments/* + }, + ], + "key": "A String", # The key is the destination identifier that is supported by the Connector. + }, + ], + "enableBackendDestinationConfig": True or False, # Optional. When enabled, the connector will be a facade/ proxy, and connects to the destination provided during connection creation. "labels": { # Optional. Resource labels to represent user-provided metadata. Refer to cloud documentation on labels for more details. https://cloud.google.com/compute/docs/labeling-resources "a_key": "A String", }, "name": "A String", # Output only. Identifier. Resource name of the Version. Format: projects/{project}/locations/{location}/customConnectors/{custom_connector}/customConnectorVersions/{custom_connector_version} - "serviceAccount": "A String", # Required. Service account needed for runtime plane to access Custom Connector secrets. + "serviceAccount": "A String", # Required. Service account used by runtime plane to access auth config secrets. "specLocation": "A String", # Optional. Location of the custom connector spec. "updateTime": "A String", # Output only. Updated time. }, @@ -500,7 +665,7 @@

Method Details

The object takes the form of: { # CustomConnectorVersion indicates a specific version of a connector. - "authConfig": { # AuthConfig defines details of a authentication type. # Required. Configuration for establishing the authentication to the connector destination. + "authConfig": { # AuthConfig defines details of a authentication type. # Optional. Authentication config for accessing connector facade/ proxy. This is used only when enable_backend_destination_config is true. "additionalVariables": [ # List containing additional auth configs. { # ConfigVariable represents a configuration variable present in a Connection. or AuthConfig. "boolValue": True or False, # Value is a bool. @@ -565,23 +730,78 @@

Method Details

"username": "A String", # Username. }, }, - "createTime": "A String", # Output only. Created time. - "destinationConfig": { # Define the Connectors target endpoint. # Required. Configuration of the customConnector's destination. - "destinations": [ # The destinations for the key. - { - "host": "A String", # For publicly routable host. - "port": 42, # The port is the target port number that is accepted by the destination. - "serviceAttachment": "A String", # PSC service attachments. Format: projects/*/regions/*/serviceAttachments/* + "backendVariableTemplates": [ # Optional. Backend variables config templates. This translates to additional variable templates in connection. + { # ConfigVariableTemplate provides metadata about a `ConfigVariable` that is used in a Connection. + "authorizationCodeLink": { # This configuration captures the details required to render an authorization link for the OAuth Authorization Code Flow. # Authorization code link options. To be populated if `ValueType` is `AUTHORIZATION_CODE` + "clientId": "A String", # The client ID assigned to the Google Cloud Connectors OAuth app for the connector data source. + "enablePkce": True or False, # Whether to enable PKCE for the auth code flow. + "scopes": [ # The scopes for which the user will authorize Google Cloud Connectors on the connector data source. + "A String", + ], + "uri": "A String", # The base URI the user must click to trigger the authorization code login flow. + }, + "description": "A String", # Description. + "displayName": "A String", # Display name of the parameter. + "enumOptions": [ # Enum options. To be populated if `ValueType` is `ENUM` + { # EnumOption definition + "displayName": "A String", # Display name of the option. + "id": "A String", # Id of the option. + }, + ], + "isAdvanced": True or False, # Indicates if current template is part of advanced settings + "key": "A String", # Key of the config variable. + "locationType": "A String", # Optional. Location Tyep denotes where this value should be sent in BYOC connections. + "required": True or False, # Flag represents that this `ConfigVariable` must be provided for a connection. + "requiredCondition": { # Struct for representing boolean expressions. # Condition under which a field would be required. The condition can be represented in the form of a logical expression. + "fieldComparisons": [ # A list of fields to be compared. + { # Field that needs to be compared. + "boolValue": True or False, # Boolean value + "comparator": "A String", # Comparator to use for comparing the field value. + "intValue": "A String", # Integer value + "key": "A String", # Key of the field. + "stringValue": "A String", # String value + }, + ], + "logicalExpressions": [ # A list of nested conditions to be compared. + # Object with schema name: LogicalExpression + ], + "logicalOperator": "A String", # The logical operator to use between the fields and conditions. + }, + "roleGrant": { # This configuration defines all the Cloud IAM roles that needs to be granted to a particular Google Cloud resource for the selected principal like service account. These configurations will let UI display to customers what IAM roles need to be granted by them. Or these configurations can be used by the UI to render a 'grant' button to do the same on behalf of the user. # Role grant configuration for the config variable. + "helperTextTemplate": "A String", # Template that UI can use to provide helper text to customers. + "principal": "A String", # Prinicipal/Identity for whom the role need to assigned. + "resource": { # Resource definition # Resource on which the roles needs to be granted for the principal. + "pathTemplate": "A String", # Template to uniquely represent a Google Cloud resource in a format IAM expects This is a template that can have references to other values provided in the config variable template. + "type": "A String", # Different types of resource supported. + }, + "roles": [ # List of roles that need to be granted. + "A String", + ], }, - ], - "key": "A String", # The key is the destination identifier that is supported by the Connector. - }, - "enableBackendDestinationConfig": True or False, # Optional. Whether to enable backend destination config. This is the backend server that the connector connects to. + "state": "A String", # State of the config variable. + "validationRegex": "A String", # Regular expression in RE2 syntax used for validating the `value` of a `ConfigVariable`. + "valueType": "A String", # Type of the parameter: string, int, bool etc. consider custom type for the benefit for the validation. + }, + ], + "createTime": "A String", # Output only. Created time. + "destinationConfigs": [ # Optional. Destination config(s) for accessing connector facade/ proxy. This is used only when enable_backend_destination_config is true. + { # Define the Connectors target endpoint. + "destinations": [ # The destinations for the key. + { + "host": "A String", # For publicly routable host. + "port": 42, # The port is the target port number that is accepted by the destination. + "serviceAttachment": "A String", # PSC service attachments. Format: projects/*/regions/*/serviceAttachments/* + }, + ], + "key": "A String", # The key is the destination identifier that is supported by the Connector. + }, + ], + "enableBackendDestinationConfig": True or False, # Optional. When enabled, the connector will be a facade/ proxy, and connects to the destination provided during connection creation. "labels": { # Optional. Resource labels to represent user-provided metadata. Refer to cloud documentation on labels for more details. https://cloud.google.com/compute/docs/labeling-resources "a_key": "A String", }, "name": "A String", # Output only. Identifier. Resource name of the Version. Format: projects/{project}/locations/{location}/customConnectors/{custom_connector}/customConnectorVersions/{custom_connector_version} - "serviceAccount": "A String", # Required. Service account needed for runtime plane to access Custom Connector secrets. + "serviceAccount": "A String", # Required. Service account used by runtime plane to access auth config secrets. "specLocation": "A String", # Optional. Location of the custom connector spec. "updateTime": "A String", # Output only. Updated time. } diff --git a/docs/dyn/connectors_v1.projects.locations.providers.connectors.versions.html b/docs/dyn/connectors_v1.projects.locations.providers.connectors.versions.html index a9069eefd50..2dbfe8ff0e4 100644 --- a/docs/dyn/connectors_v1.projects.locations.providers.connectors.versions.html +++ b/docs/dyn/connectors_v1.projects.locations.providers.connectors.versions.html @@ -141,6 +141,7 @@

Method Details

], "isAdvanced": True or False, # Indicates if current template is part of advanced settings "key": "A String", # Key of the config variable. + "locationType": "A String", # Optional. Location Tyep denotes where this value should be sent in BYOC connections. "required": True or False, # Flag represents that this `ConfigVariable` must be provided for a connection. "requiredCondition": { # Struct for representing boolean expressions. # Condition under which a field would be required. The condition can be represented in the form of a logical expression. "fieldComparisons": [ # A list of fields to be compared. @@ -197,6 +198,7 @@

Method Details

], "isAdvanced": True or False, # Indicates if current template is part of advanced settings "key": "A String", # Key of the config variable. + "locationType": "A String", # Optional. Location Tyep denotes where this value should be sent in BYOC connections. "required": True or False, # Flag represents that this `ConfigVariable` must be provided for a connection. "requiredCondition": { # Struct for representing boolean expressions. # Condition under which a field would be required. The condition can be represented in the form of a logical expression. "fieldComparisons": [ # A list of fields to be compared. @@ -298,6 +300,7 @@

Method Details

], "isAdvanced": True or False, # Indicates if current template is part of advanced settings "key": "A String", # Key of the config variable. + "locationType": "A String", # Optional. Location Tyep denotes where this value should be sent in BYOC connections. "required": True or False, # Flag represents that this `ConfigVariable` must be provided for a connection. "requiredCondition": { # Struct for representing boolean expressions. # Condition under which a field would be required. The condition can be represented in the form of a logical expression. "fieldComparisons": [ # A list of fields to be compared. @@ -354,6 +357,7 @@

Method Details

], "isAdvanced": True or False, # Indicates if current template is part of advanced settings "key": "A String", # Key of the config variable. + "locationType": "A String", # Optional. Location Tyep denotes where this value should be sent in BYOC connections. "required": True or False, # Flag represents that this `ConfigVariable` must be provided for a connection. "requiredCondition": { # Struct for representing boolean expressions. # Condition under which a field would be required. The condition can be represented in the form of a logical expression. "fieldComparisons": [ # A list of fields to be compared. @@ -411,6 +415,7 @@

Method Details

], "isAdvanced": True or False, # Indicates if current template is part of advanced settings "key": "A String", # Key of the config variable. + "locationType": "A String", # Optional. Location Tyep denotes where this value should be sent in BYOC connections. "required": True or False, # Flag represents that this `ConfigVariable` must be provided for a connection. "requiredCondition": { # Struct for representing boolean expressions. # Condition under which a field would be required. The condition can be represented in the form of a logical expression. "fieldComparisons": [ # A list of fields to be compared. @@ -469,6 +474,7 @@

Method Details

], "isAdvanced": True or False, # Indicates if current template is part of advanced settings "key": "A String", # Key of the config variable. + "locationType": "A String", # Optional. Location Tyep denotes where this value should be sent in BYOC connections. "required": True or False, # Flag represents that this `ConfigVariable` must be provided for a connection. "requiredCondition": { # Struct for representing boolean expressions. # Condition under which a field would be required. The condition can be represented in the form of a logical expression. "fieldComparisons": [ # A list of fields to be compared. @@ -568,6 +574,7 @@

Method Details

], "isAdvanced": True or False, # Indicates if current template is part of advanced settings "key": "A String", # Key of the config variable. + "locationType": "A String", # Optional. Location Tyep denotes where this value should be sent in BYOC connections. "required": True or False, # Flag represents that this `ConfigVariable` must be provided for a connection. "requiredCondition": { # Struct for representing boolean expressions. # Condition under which a field would be required. The condition can be represented in the form of a logical expression. "fieldComparisons": [ # A list of fields to be compared. @@ -614,6 +621,9 @@

Method Details

"entityApis": True or False, # Specifies if the connector supports entity apis like 'createEntity'. "sqlQuery": True or False, # Specifies if the connector supports 'ExecuteSqlQuery' operation. }, + "unsupportedConnectionTypes": [ # Output only. Unsupported connection types. + "A String", + ], "updateTime": "A String", # Output only. Updated time. }
@@ -666,6 +676,7 @@

Method Details

], "isAdvanced": True or False, # Indicates if current template is part of advanced settings "key": "A String", # Key of the config variable. + "locationType": "A String", # Optional. Location Tyep denotes where this value should be sent in BYOC connections. "required": True or False, # Flag represents that this `ConfigVariable` must be provided for a connection. "requiredCondition": { # Struct for representing boolean expressions. # Condition under which a field would be required. The condition can be represented in the form of a logical expression. "fieldComparisons": [ # A list of fields to be compared. @@ -722,6 +733,7 @@

Method Details

], "isAdvanced": True or False, # Indicates if current template is part of advanced settings "key": "A String", # Key of the config variable. + "locationType": "A String", # Optional. Location Tyep denotes where this value should be sent in BYOC connections. "required": True or False, # Flag represents that this `ConfigVariable` must be provided for a connection. "requiredCondition": { # Struct for representing boolean expressions. # Condition under which a field would be required. The condition can be represented in the form of a logical expression. "fieldComparisons": [ # A list of fields to be compared. @@ -823,6 +835,7 @@

Method Details

], "isAdvanced": True or False, # Indicates if current template is part of advanced settings "key": "A String", # Key of the config variable. + "locationType": "A String", # Optional. Location Tyep denotes where this value should be sent in BYOC connections. "required": True or False, # Flag represents that this `ConfigVariable` must be provided for a connection. "requiredCondition": { # Struct for representing boolean expressions. # Condition under which a field would be required. The condition can be represented in the form of a logical expression. "fieldComparisons": [ # A list of fields to be compared. @@ -879,6 +892,7 @@

Method Details

], "isAdvanced": True or False, # Indicates if current template is part of advanced settings "key": "A String", # Key of the config variable. + "locationType": "A String", # Optional. Location Tyep denotes where this value should be sent in BYOC connections. "required": True or False, # Flag represents that this `ConfigVariable` must be provided for a connection. "requiredCondition": { # Struct for representing boolean expressions. # Condition under which a field would be required. The condition can be represented in the form of a logical expression. "fieldComparisons": [ # A list of fields to be compared. @@ -936,6 +950,7 @@

Method Details

], "isAdvanced": True or False, # Indicates if current template is part of advanced settings "key": "A String", # Key of the config variable. + "locationType": "A String", # Optional. Location Tyep denotes where this value should be sent in BYOC connections. "required": True or False, # Flag represents that this `ConfigVariable` must be provided for a connection. "requiredCondition": { # Struct for representing boolean expressions. # Condition under which a field would be required. The condition can be represented in the form of a logical expression. "fieldComparisons": [ # A list of fields to be compared. @@ -994,6 +1009,7 @@

Method Details

], "isAdvanced": True or False, # Indicates if current template is part of advanced settings "key": "A String", # Key of the config variable. + "locationType": "A String", # Optional. Location Tyep denotes where this value should be sent in BYOC connections. "required": True or False, # Flag represents that this `ConfigVariable` must be provided for a connection. "requiredCondition": { # Struct for representing boolean expressions. # Condition under which a field would be required. The condition can be represented in the form of a logical expression. "fieldComparisons": [ # A list of fields to be compared. @@ -1093,6 +1109,7 @@

Method Details

], "isAdvanced": True or False, # Indicates if current template is part of advanced settings "key": "A String", # Key of the config variable. + "locationType": "A String", # Optional. Location Tyep denotes where this value should be sent in BYOC connections. "required": True or False, # Flag represents that this `ConfigVariable` must be provided for a connection. "requiredCondition": { # Struct for representing boolean expressions. # Condition under which a field would be required. The condition can be represented in the form of a logical expression. "fieldComparisons": [ # A list of fields to be compared. @@ -1139,6 +1156,9 @@

Method Details

"entityApis": True or False, # Specifies if the connector supports entity apis like 'createEntity'. "sqlQuery": True or False, # Specifies if the connector supports 'ExecuteSqlQuery' operation. }, + "unsupportedConnectionTypes": [ # Output only. Unsupported connection types. + "A String", + ], "updateTime": "A String", # Output only. Updated time. }, ], diff --git a/docs/dyn/connectors_v2.projects.locations.connections.html b/docs/dyn/connectors_v2.projects.locations.connections.html index 05f532260f4..108a7d7e08d 100644 --- a/docs/dyn/connectors_v2.projects.locations.connections.html +++ b/docs/dyn/connectors_v2.projects.locations.connections.html @@ -84,18 +84,98 @@

Instance Methods

Returns the entityTypes Resource.

+

+ checkReadiness(name, x__xgafv=None)

+

Reports readiness status of the connector. Similar logic to GetStatus but modified for kubernetes health check to understand.

+

+ checkStatus(name, x__xgafv=None)

+

Reports the status of the connection. Note that when the connection is in a state that is not ACTIVE, the implementation of this RPC method must return a Status with the corresponding State instead of returning a gRPC status code that is not "OK", which indicates that ConnectionStatus itself, not the connection, failed.

close()

Close httplib2 connections.

+

+ exchangeAuthCode(name, body=None, x__xgafv=None)

+

ExchangeAuthCode exchanges the OAuth authorization code (and other necessary data) for an access token (and associated credentials).

executeSqlQuery(connection, body=None, x__xgafv=None)

Executes a SQL statement specified in the body of the request. An example of this SQL statement in the case of Salesforce connector would be 'select * from Account a, Order o where a.Id = o.AccountId'.

+

+ refreshAccessToken(name, body=None, x__xgafv=None)

+

RefreshAccessToken exchanges the OAuth refresh token (and other necessary data) for a new access token (and new associated credentials).

Method Details

+
+ checkReadiness(name, x__xgafv=None) +
Reports readiness status of the connector. Similar logic to GetStatus but modified for kubernetes health check to understand.
+
+Args:
+  name: string, A parameter (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response containing status of the connector for readiness prober.
+  "status": "A String",
+}
+
+ +
+ checkStatus(name, x__xgafv=None) +
Reports the status of the connection. Note that when the connection is in a state that is not ACTIVE, the implementation of this RPC method must return a Status with the corresponding State instead of returning a gRPC status code that is not "OK", which indicates that ConnectionStatus itself, not the connection, failed.
+
+Args:
+  name: string, A parameter (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # The status of the connector.
+  "description": "A String", # When the connector is not in ACTIVE state, the description must be populated to specify the reason why it's not in ACTIVE state.
+  "state": "A String", # State of the connector.
+}
+
+
close()
Close httplib2 connections.
+
+ exchangeAuthCode(name, body=None, x__xgafv=None) +
ExchangeAuthCode exchanges the OAuth authorization code (and other necessary data) for an access token (and associated credentials).
+
+Args:
+  name: string, A parameter (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # ExchangeAuthCodeRequest currently includes no fields.
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # ExchangeAuthCodeResponse includes the returned access token and its associated credentials.
+  "accessCredentials": { # AccessCredentials includes the OAuth access token, and the other fields returned along with it.
+    "accessToken": "A String", # OAuth access token.
+    "expiresIn": "A String", # Duration till the access token expires.
+    "refreshToken": "A String", # OAuth refresh token.
+  },
+}
+
+
executeSqlQuery(connection, body=None, x__xgafv=None)
Executes a SQL statement specified in the body of the request. An example of this SQL statement in the case of Salesforce connector would be 'select * from Account a, Order o where a.Id = o.AccountId'.
@@ -136,4 +216,33 @@ 

Method Details

}
+
+ refreshAccessToken(name, body=None, x__xgafv=None) +
RefreshAccessToken exchanges the OAuth refresh token (and other necessary data) for a new access token (and new associated credentials).
+
+Args:
+  name: string, A parameter (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # RefreshAccessTokenRequest currently includes no fields.
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # RefreshAccessTokenResponse includes the returned access token and its associated credentials.
+  "accessCredentials": { # AccessCredentials includes the OAuth access token, and the other fields returned along with it.
+    "accessToken": "A String", # OAuth access token.
+    "expiresIn": "A String", # Duration till the access token expires.
+    "refreshToken": "A String", # OAuth refresh token.
+  },
+}
+
+ \ No newline at end of file diff --git a/docs/dyn/container_v1.projects.locations.clusters.html b/docs/dyn/container_v1.projects.locations.clusters.html index 3061816c5ab..a81b427d82a 100644 --- a/docs/dyn/container_v1.projects.locations.clusters.html +++ b/docs/dyn/container_v1.projects.locations.clusters.html @@ -527,6 +527,7 @@

Method Details

"monitoringConfig": { # MonitoringConfig is cluster monitoring configuration. # Monitoring configuration for the cluster. "advancedDatapathObservabilityConfig": { # AdvancedDatapathObservabilityConfig specifies configuration of observability features of advanced datapath. # Configuration of Advanced Datapath Observability features. "enableMetrics": True or False, # Expose flow metrics on nodes + "enableRelay": True or False, # Enable Relay component "relayMode": "A String", # Method used to make Relay available }, "componentConfig": { # MonitoringComponentConfig is cluster monitoring component configuration. # Monitoring components configuration @@ -1433,6 +1434,7 @@

Method Details

"monitoringConfig": { # MonitoringConfig is cluster monitoring configuration. # Monitoring configuration for the cluster. "advancedDatapathObservabilityConfig": { # AdvancedDatapathObservabilityConfig specifies configuration of observability features of advanced datapath. # Configuration of Advanced Datapath Observability features. "enableMetrics": True or False, # Expose flow metrics on nodes + "enableRelay": True or False, # Enable Relay component "relayMode": "A String", # Method used to make Relay available }, "componentConfig": { # MonitoringComponentConfig is cluster monitoring component configuration. # Monitoring components configuration @@ -2242,6 +2244,7 @@

Method Details

"monitoringConfig": { # MonitoringConfig is cluster monitoring configuration. # Monitoring configuration for the cluster. "advancedDatapathObservabilityConfig": { # AdvancedDatapathObservabilityConfig specifies configuration of observability features of advanced datapath. # Configuration of Advanced Datapath Observability features. "enableMetrics": True or False, # Expose flow metrics on nodes + "enableRelay": True or False, # Enable Relay component "relayMode": "A String", # Method used to make Relay available }, "componentConfig": { # MonitoringComponentConfig is cluster monitoring component configuration. # Monitoring components configuration @@ -3798,6 +3801,7 @@

Method Details

"desiredMonitoringConfig": { # MonitoringConfig is cluster monitoring configuration. # The desired monitoring configuration. "advancedDatapathObservabilityConfig": { # AdvancedDatapathObservabilityConfig specifies configuration of observability features of advanced datapath. # Configuration of Advanced Datapath Observability features. "enableMetrics": True or False, # Expose flow metrics on nodes + "enableRelay": True or False, # Enable Relay component "relayMode": "A String", # Method used to make Relay available }, "componentConfig": { # MonitoringComponentConfig is cluster monitoring component configuration. # Monitoring components configuration diff --git a/docs/dyn/container_v1.projects.zones.clusters.html b/docs/dyn/container_v1.projects.zones.clusters.html index 6581ab078e9..e1e08ae4207 100644 --- a/docs/dyn/container_v1.projects.zones.clusters.html +++ b/docs/dyn/container_v1.projects.zones.clusters.html @@ -603,6 +603,7 @@

Method Details

"monitoringConfig": { # MonitoringConfig is cluster monitoring configuration. # Monitoring configuration for the cluster. "advancedDatapathObservabilityConfig": { # AdvancedDatapathObservabilityConfig specifies configuration of observability features of advanced datapath. # Configuration of Advanced Datapath Observability features. "enableMetrics": True or False, # Expose flow metrics on nodes + "enableRelay": True or False, # Enable Relay component "relayMode": "A String", # Method used to make Relay available }, "componentConfig": { # MonitoringComponentConfig is cluster monitoring component configuration. # Monitoring components configuration @@ -1509,6 +1510,7 @@

Method Details

"monitoringConfig": { # MonitoringConfig is cluster monitoring configuration. # Monitoring configuration for the cluster. "advancedDatapathObservabilityConfig": { # AdvancedDatapathObservabilityConfig specifies configuration of observability features of advanced datapath. # Configuration of Advanced Datapath Observability features. "enableMetrics": True or False, # Expose flow metrics on nodes + "enableRelay": True or False, # Enable Relay component "relayMode": "A String", # Method used to make Relay available }, "componentConfig": { # MonitoringComponentConfig is cluster monitoring component configuration. # Monitoring components configuration @@ -2362,6 +2364,7 @@

Method Details

"monitoringConfig": { # MonitoringConfig is cluster monitoring configuration. # Monitoring configuration for the cluster. "advancedDatapathObservabilityConfig": { # AdvancedDatapathObservabilityConfig specifies configuration of observability features of advanced datapath. # Configuration of Advanced Datapath Observability features. "enableMetrics": True or False, # Expose flow metrics on nodes + "enableRelay": True or False, # Enable Relay component "relayMode": "A String", # Method used to make Relay available }, "componentConfig": { # MonitoringComponentConfig is cluster monitoring component configuration. # Monitoring components configuration @@ -3825,6 +3828,7 @@

Method Details

"desiredMonitoringConfig": { # MonitoringConfig is cluster monitoring configuration. # The desired monitoring configuration. "advancedDatapathObservabilityConfig": { # AdvancedDatapathObservabilityConfig specifies configuration of observability features of advanced datapath. # Configuration of Advanced Datapath Observability features. "enableMetrics": True or False, # Expose flow metrics on nodes + "enableRelay": True or False, # Enable Relay component "relayMode": "A String", # Method used to make Relay available }, "componentConfig": { # MonitoringComponentConfig is cluster monitoring component configuration. # Monitoring components configuration diff --git a/docs/dyn/container_v1beta1.projects.locations.clusters.html b/docs/dyn/container_v1beta1.projects.locations.clusters.html index 58ec59ff3ec..db6e1e88e66 100644 --- a/docs/dyn/container_v1beta1.projects.locations.clusters.html +++ b/docs/dyn/container_v1beta1.projects.locations.clusters.html @@ -549,6 +549,7 @@

Method Details

"monitoringConfig": { # MonitoringConfig is cluster monitoring configuration. # Monitoring configuration for the cluster. "advancedDatapathObservabilityConfig": { # AdvancedDatapathObservabilityConfig specifies configuration of observability features of advanced datapath. # Configuration of Advanced Datapath Observability features. "enableMetrics": True or False, # Expose flow metrics on nodes + "enableRelay": True or False, # Enable Relay component "relayMode": "A String", # Method used to make Relay available }, "componentConfig": { # MonitoringComponentConfig is cluster monitoring component configuration. # Monitoring components configuration @@ -1540,6 +1541,7 @@

Method Details

"monitoringConfig": { # MonitoringConfig is cluster monitoring configuration. # Monitoring configuration for the cluster. "advancedDatapathObservabilityConfig": { # AdvancedDatapathObservabilityConfig specifies configuration of observability features of advanced datapath. # Configuration of Advanced Datapath Observability features. "enableMetrics": True or False, # Expose flow metrics on nodes + "enableRelay": True or False, # Enable Relay component "relayMode": "A String", # Method used to make Relay available }, "componentConfig": { # MonitoringComponentConfig is cluster monitoring component configuration. # Monitoring components configuration @@ -2434,6 +2436,7 @@

Method Details

"monitoringConfig": { # MonitoringConfig is cluster monitoring configuration. # Monitoring configuration for the cluster. "advancedDatapathObservabilityConfig": { # AdvancedDatapathObservabilityConfig specifies configuration of observability features of advanced datapath. # Configuration of Advanced Datapath Observability features. "enableMetrics": True or False, # Expose flow metrics on nodes + "enableRelay": True or False, # Enable Relay component "relayMode": "A String", # Method used to make Relay available }, "componentConfig": { # MonitoringComponentConfig is cluster monitoring component configuration. # Monitoring components configuration @@ -4086,6 +4089,7 @@

Method Details

"desiredMonitoringConfig": { # MonitoringConfig is cluster monitoring configuration. # The desired monitoring configuration. "advancedDatapathObservabilityConfig": { # AdvancedDatapathObservabilityConfig specifies configuration of observability features of advanced datapath. # Configuration of Advanced Datapath Observability features. "enableMetrics": True or False, # Expose flow metrics on nodes + "enableRelay": True or False, # Enable Relay component "relayMode": "A String", # Method used to make Relay available }, "componentConfig": { # MonitoringComponentConfig is cluster monitoring component configuration. # Monitoring components configuration diff --git a/docs/dyn/container_v1beta1.projects.zones.clusters.html b/docs/dyn/container_v1beta1.projects.zones.clusters.html index 5ced3c62567..00e053b9d89 100644 --- a/docs/dyn/container_v1beta1.projects.zones.clusters.html +++ b/docs/dyn/container_v1beta1.projects.zones.clusters.html @@ -632,6 +632,7 @@

Method Details

"monitoringConfig": { # MonitoringConfig is cluster monitoring configuration. # Monitoring configuration for the cluster. "advancedDatapathObservabilityConfig": { # AdvancedDatapathObservabilityConfig specifies configuration of observability features of advanced datapath. # Configuration of Advanced Datapath Observability features. "enableMetrics": True or False, # Expose flow metrics on nodes + "enableRelay": True or False, # Enable Relay component "relayMode": "A String", # Method used to make Relay available }, "componentConfig": { # MonitoringComponentConfig is cluster monitoring component configuration. # Monitoring components configuration @@ -1623,6 +1624,7 @@

Method Details

"monitoringConfig": { # MonitoringConfig is cluster monitoring configuration. # Monitoring configuration for the cluster. "advancedDatapathObservabilityConfig": { # AdvancedDatapathObservabilityConfig specifies configuration of observability features of advanced datapath. # Configuration of Advanced Datapath Observability features. "enableMetrics": True or False, # Expose flow metrics on nodes + "enableRelay": True or False, # Enable Relay component "relayMode": "A String", # Method used to make Relay available }, "componentConfig": { # MonitoringComponentConfig is cluster monitoring component configuration. # Monitoring components configuration @@ -2561,6 +2563,7 @@

Method Details

"monitoringConfig": { # MonitoringConfig is cluster monitoring configuration. # Monitoring configuration for the cluster. "advancedDatapathObservabilityConfig": { # AdvancedDatapathObservabilityConfig specifies configuration of observability features of advanced datapath. # Configuration of Advanced Datapath Observability features. "enableMetrics": True or False, # Expose flow metrics on nodes + "enableRelay": True or False, # Enable Relay component "relayMode": "A String", # Method used to make Relay available }, "componentConfig": { # MonitoringComponentConfig is cluster monitoring component configuration. # Monitoring components configuration @@ -4113,6 +4116,7 @@

Method Details

"desiredMonitoringConfig": { # MonitoringConfig is cluster monitoring configuration. # The desired monitoring configuration. "advancedDatapathObservabilityConfig": { # AdvancedDatapathObservabilityConfig specifies configuration of observability features of advanced datapath. # Configuration of Advanced Datapath Observability features. "enableMetrics": True or False, # Expose flow metrics on nodes + "enableRelay": True or False, # Enable Relay component "relayMode": "A String", # Method used to make Relay available }, "componentConfig": { # MonitoringComponentConfig is cluster monitoring component configuration. # Monitoring components configuration diff --git a/docs/dyn/datacatalog_v1.projects.locations.taxonomies.html b/docs/dyn/datacatalog_v1.projects.locations.taxonomies.html index 5c67454cf55..4a9bcfaeefa 100644 --- a/docs/dyn/datacatalog_v1.projects.locations.taxonomies.html +++ b/docs/dyn/datacatalog_v1.projects.locations.taxonomies.html @@ -139,7 +139,7 @@

Method Details

], "description": "A String", # Optional. Description of this taxonomy. If not set, defaults to empty. The description must contain only Unicode characters, tabs, newlines, carriage returns, and page breaks, and be at most 2000 bytes long when encoded in UTF-8. "displayName": "A String", # Required. User-defined name of this taxonomy. The name can't start or end with spaces, must contain only Unicode letters, numbers, underscores, dashes, and spaces, and be at most 200 bytes long when encoded in UTF-8. The taxonomy display name must be unique within an organization. - "name": "A String", # Output only. Resource name of this taxonomy in URL format. Note: Policy tag manager generates unique taxonomy IDs. + "name": "A String", # Identifier. Resource name of this taxonomy in URL format. Note: Policy tag manager generates unique taxonomy IDs. "policyTagCount": 42, # Output only. Number of policy tags in this taxonomy. "service": { # The source system of the Taxonomy. # Output only. Identity of the service which owns the Taxonomy. This field is only populated when the taxonomy is created by a Google Cloud service. Currently only 'DATAPLEX' is supported. "identity": "A String", # The service agent for the service. @@ -166,7 +166,7 @@

Method Details

], "description": "A String", # Optional. Description of this taxonomy. If not set, defaults to empty. The description must contain only Unicode characters, tabs, newlines, carriage returns, and page breaks, and be at most 2000 bytes long when encoded in UTF-8. "displayName": "A String", # Required. User-defined name of this taxonomy. The name can't start or end with spaces, must contain only Unicode letters, numbers, underscores, dashes, and spaces, and be at most 200 bytes long when encoded in UTF-8. The taxonomy display name must be unique within an organization. - "name": "A String", # Output only. Resource name of this taxonomy in URL format. Note: Policy tag manager generates unique taxonomy IDs. + "name": "A String", # Identifier. Resource name of this taxonomy in URL format. Note: Policy tag manager generates unique taxonomy IDs. "policyTagCount": 42, # Output only. Number of policy tags in this taxonomy. "service": { # The source system of the Taxonomy. # Output only. Identity of the service which owns the Taxonomy. This field is only populated when the taxonomy is created by a Google Cloud service. Currently only 'DATAPLEX' is supported. "identity": "A String", # The service agent for the service. @@ -257,7 +257,7 @@

Method Details

], "description": "A String", # Optional. Description of this taxonomy. If not set, defaults to empty. The description must contain only Unicode characters, tabs, newlines, carriage returns, and page breaks, and be at most 2000 bytes long when encoded in UTF-8. "displayName": "A String", # Required. User-defined name of this taxonomy. The name can't start or end with spaces, must contain only Unicode letters, numbers, underscores, dashes, and spaces, and be at most 200 bytes long when encoded in UTF-8. The taxonomy display name must be unique within an organization. - "name": "A String", # Output only. Resource name of this taxonomy in URL format. Note: Policy tag manager generates unique taxonomy IDs. + "name": "A String", # Identifier. Resource name of this taxonomy in URL format. Note: Policy tag manager generates unique taxonomy IDs. "policyTagCount": 42, # Output only. Number of policy tags in this taxonomy. "service": { # The source system of the Taxonomy. # Output only. Identity of the service which owns the Taxonomy. This field is only populated when the taxonomy is created by a Google Cloud service. Currently only 'DATAPLEX' is supported. "identity": "A String", # The service agent for the service. @@ -366,7 +366,7 @@

Method Details

], "description": "A String", # Optional. Description of this taxonomy. If not set, defaults to empty. The description must contain only Unicode characters, tabs, newlines, carriage returns, and page breaks, and be at most 2000 bytes long when encoded in UTF-8. "displayName": "A String", # Required. User-defined name of this taxonomy. The name can't start or end with spaces, must contain only Unicode letters, numbers, underscores, dashes, and spaces, and be at most 200 bytes long when encoded in UTF-8. The taxonomy display name must be unique within an organization. - "name": "A String", # Output only. Resource name of this taxonomy in URL format. Note: Policy tag manager generates unique taxonomy IDs. + "name": "A String", # Identifier. Resource name of this taxonomy in URL format. Note: Policy tag manager generates unique taxonomy IDs. "policyTagCount": 42, # Output only. Number of policy tags in this taxonomy. "service": { # The source system of the Taxonomy. # Output only. Identity of the service which owns the Taxonomy. This field is only populated when the taxonomy is created by a Google Cloud service. Currently only 'DATAPLEX' is supported. "identity": "A String", # The service agent for the service. @@ -408,7 +408,7 @@

Method Details

], "description": "A String", # Optional. Description of this taxonomy. If not set, defaults to empty. The description must contain only Unicode characters, tabs, newlines, carriage returns, and page breaks, and be at most 2000 bytes long when encoded in UTF-8. "displayName": "A String", # Required. User-defined name of this taxonomy. The name can't start or end with spaces, must contain only Unicode letters, numbers, underscores, dashes, and spaces, and be at most 200 bytes long when encoded in UTF-8. The taxonomy display name must be unique within an organization. - "name": "A String", # Output only. Resource name of this taxonomy in URL format. Note: Policy tag manager generates unique taxonomy IDs. + "name": "A String", # Identifier. Resource name of this taxonomy in URL format. Note: Policy tag manager generates unique taxonomy IDs. "policyTagCount": 42, # Output only. Number of policy tags in this taxonomy. "service": { # The source system of the Taxonomy. # Output only. Identity of the service which owns the Taxonomy. This field is only populated when the taxonomy is created by a Google Cloud service. Currently only 'DATAPLEX' is supported. "identity": "A String", # The service agent for the service. @@ -443,7 +443,7 @@

Method Details

Updates a taxonomy, including its display name, description, and activated policy types.
 
 Args:
-  name: string, Output only. Resource name of this taxonomy in URL format. Note: Policy tag manager generates unique taxonomy IDs. (required)
+  name: string, Identifier. Resource name of this taxonomy in URL format. Note: Policy tag manager generates unique taxonomy IDs. (required)
   body: object, The request body.
     The object takes the form of:
 
@@ -453,7 +453,7 @@ 

Method Details

], "description": "A String", # Optional. Description of this taxonomy. If not set, defaults to empty. The description must contain only Unicode characters, tabs, newlines, carriage returns, and page breaks, and be at most 2000 bytes long when encoded in UTF-8. "displayName": "A String", # Required. User-defined name of this taxonomy. The name can't start or end with spaces, must contain only Unicode letters, numbers, underscores, dashes, and spaces, and be at most 200 bytes long when encoded in UTF-8. The taxonomy display name must be unique within an organization. - "name": "A String", # Output only. Resource name of this taxonomy in URL format. Note: Policy tag manager generates unique taxonomy IDs. + "name": "A String", # Identifier. Resource name of this taxonomy in URL format. Note: Policy tag manager generates unique taxonomy IDs. "policyTagCount": 42, # Output only. Number of policy tags in this taxonomy. "service": { # The source system of the Taxonomy. # Output only. Identity of the service which owns the Taxonomy. This field is only populated when the taxonomy is created by a Google Cloud service. Currently only 'DATAPLEX' is supported. "identity": "A String", # The service agent for the service. @@ -481,7 +481,7 @@

Method Details

], "description": "A String", # Optional. Description of this taxonomy. If not set, defaults to empty. The description must contain only Unicode characters, tabs, newlines, carriage returns, and page breaks, and be at most 2000 bytes long when encoded in UTF-8. "displayName": "A String", # Required. User-defined name of this taxonomy. The name can't start or end with spaces, must contain only Unicode letters, numbers, underscores, dashes, and spaces, and be at most 200 bytes long when encoded in UTF-8. The taxonomy display name must be unique within an organization. - "name": "A String", # Output only. Resource name of this taxonomy in URL format. Note: Policy tag manager generates unique taxonomy IDs. + "name": "A String", # Identifier. Resource name of this taxonomy in URL format. Note: Policy tag manager generates unique taxonomy IDs. "policyTagCount": 42, # Output only. Number of policy tags in this taxonomy. "service": { # The source system of the Taxonomy. # Output only. Identity of the service which owns the Taxonomy. This field is only populated when the taxonomy is created by a Google Cloud service. Currently only 'DATAPLEX' is supported. "identity": "A String", # The service agent for the service. @@ -538,7 +538,7 @@

Method Details

], "description": "A String", # Optional. Description of this taxonomy. If not set, defaults to empty. The description must contain only Unicode characters, tabs, newlines, carriage returns, and page breaks, and be at most 2000 bytes long when encoded in UTF-8. "displayName": "A String", # Required. User-defined name of this taxonomy. The name can't start or end with spaces, must contain only Unicode letters, numbers, underscores, dashes, and spaces, and be at most 200 bytes long when encoded in UTF-8. The taxonomy display name must be unique within an organization. - "name": "A String", # Output only. Resource name of this taxonomy in URL format. Note: Policy tag manager generates unique taxonomy IDs. + "name": "A String", # Identifier. Resource name of this taxonomy in URL format. Note: Policy tag manager generates unique taxonomy IDs. "policyTagCount": 42, # Output only. Number of policy tags in this taxonomy. "service": { # The source system of the Taxonomy. # Output only. Identity of the service which owns the Taxonomy. This field is only populated when the taxonomy is created by a Google Cloud service. Currently only 'DATAPLEX' is supported. "identity": "A String", # The service agent for the service. diff --git a/docs/dyn/datacatalog_v1.projects.locations.taxonomies.policyTags.html b/docs/dyn/datacatalog_v1.projects.locations.taxonomies.policyTags.html index 11f460ddf85..877bb043259 100644 --- a/docs/dyn/datacatalog_v1.projects.locations.taxonomies.policyTags.html +++ b/docs/dyn/datacatalog_v1.projects.locations.taxonomies.policyTags.html @@ -125,7 +125,7 @@

Method Details

], "description": "A String", # Description of this policy tag. If not set, defaults to empty. The description must contain only Unicode characters, tabs, newlines, carriage returns and page breaks, and be at most 2000 bytes long when encoded in UTF-8. "displayName": "A String", # Required. User-defined name of this policy tag. The name can't start or end with spaces and must be unique within the parent taxonomy, contain only Unicode letters, numbers, underscores, dashes and spaces, and be at most 200 bytes long when encoded in UTF-8. - "name": "A String", # Output only. Resource name of this policy tag in the URL format. The policy tag manager generates unique taxonomy IDs and policy tag IDs. + "name": "A String", # Identifier. Resource name of this policy tag in the URL format. The policy tag manager generates unique taxonomy IDs and policy tag IDs. "parentPolicyTag": "A String", # Resource name of this policy tag's parent policy tag. If empty, this is a top level tag. If not set, defaults to an empty string. For example, for the "LatLong" policy tag in the example above, this field contains the resource name of the "Geolocation" policy tag, and, for "Geolocation", this field is empty. } @@ -143,7 +143,7 @@

Method Details

], "description": "A String", # Description of this policy tag. If not set, defaults to empty. The description must contain only Unicode characters, tabs, newlines, carriage returns and page breaks, and be at most 2000 bytes long when encoded in UTF-8. "displayName": "A String", # Required. User-defined name of this policy tag. The name can't start or end with spaces and must be unique within the parent taxonomy, contain only Unicode letters, numbers, underscores, dashes and spaces, and be at most 200 bytes long when encoded in UTF-8. - "name": "A String", # Output only. Resource name of this policy tag in the URL format. The policy tag manager generates unique taxonomy IDs and policy tag IDs. + "name": "A String", # Identifier. Resource name of this policy tag in the URL format. The policy tag manager generates unique taxonomy IDs and policy tag IDs. "parentPolicyTag": "A String", # Resource name of this policy tag's parent policy tag. If empty, this is a top level tag. If not set, defaults to an empty string. For example, for the "LatLong" policy tag in the example above, this field contains the resource name of the "Geolocation" policy tag, and, for "Geolocation", this field is empty. }
@@ -186,7 +186,7 @@

Method Details

], "description": "A String", # Description of this policy tag. If not set, defaults to empty. The description must contain only Unicode characters, tabs, newlines, carriage returns and page breaks, and be at most 2000 bytes long when encoded in UTF-8. "displayName": "A String", # Required. User-defined name of this policy tag. The name can't start or end with spaces and must be unique within the parent taxonomy, contain only Unicode letters, numbers, underscores, dashes and spaces, and be at most 200 bytes long when encoded in UTF-8. - "name": "A String", # Output only. Resource name of this policy tag in the URL format. The policy tag manager generates unique taxonomy IDs and policy tag IDs. + "name": "A String", # Identifier. Resource name of this policy tag in the URL format. The policy tag manager generates unique taxonomy IDs and policy tag IDs. "parentPolicyTag": "A String", # Resource name of this policy tag's parent policy tag. If empty, this is a top level tag. If not set, defaults to an empty string. For example, for the "LatLong" policy tag in the example above, this field contains the resource name of the "Geolocation" policy tag, and, for "Geolocation", this field is empty. }
@@ -259,7 +259,7 @@

Method Details

], "description": "A String", # Description of this policy tag. If not set, defaults to empty. The description must contain only Unicode characters, tabs, newlines, carriage returns and page breaks, and be at most 2000 bytes long when encoded in UTF-8. "displayName": "A String", # Required. User-defined name of this policy tag. The name can't start or end with spaces and must be unique within the parent taxonomy, contain only Unicode letters, numbers, underscores, dashes and spaces, and be at most 200 bytes long when encoded in UTF-8. - "name": "A String", # Output only. Resource name of this policy tag in the URL format. The policy tag manager generates unique taxonomy IDs and policy tag IDs. + "name": "A String", # Identifier. Resource name of this policy tag in the URL format. The policy tag manager generates unique taxonomy IDs and policy tag IDs. "parentPolicyTag": "A String", # Resource name of this policy tag's parent policy tag. If empty, this is a top level tag. If not set, defaults to an empty string. For example, for the "LatLong" policy tag in the example above, this field contains the resource name of the "Geolocation" policy tag, and, for "Geolocation", this field is empty. }, ], @@ -285,7 +285,7 @@

Method Details

Updates a policy tag, including its display name, description, and parent policy tag.
 
 Args:
-  name: string, Output only. Resource name of this policy tag in the URL format. The policy tag manager generates unique taxonomy IDs and policy tag IDs. (required)
+  name: string, Identifier. Resource name of this policy tag in the URL format. The policy tag manager generates unique taxonomy IDs and policy tag IDs. (required)
   body: object, The request body.
     The object takes the form of:
 
@@ -295,7 +295,7 @@ 

Method Details

], "description": "A String", # Description of this policy tag. If not set, defaults to empty. The description must contain only Unicode characters, tabs, newlines, carriage returns and page breaks, and be at most 2000 bytes long when encoded in UTF-8. "displayName": "A String", # Required. User-defined name of this policy tag. The name can't start or end with spaces and must be unique within the parent taxonomy, contain only Unicode letters, numbers, underscores, dashes and spaces, and be at most 200 bytes long when encoded in UTF-8. - "name": "A String", # Output only. Resource name of this policy tag in the URL format. The policy tag manager generates unique taxonomy IDs and policy tag IDs. + "name": "A String", # Identifier. Resource name of this policy tag in the URL format. The policy tag manager generates unique taxonomy IDs and policy tag IDs. "parentPolicyTag": "A String", # Resource name of this policy tag's parent policy tag. If empty, this is a top level tag. If not set, defaults to an empty string. For example, for the "LatLong" policy tag in the example above, this field contains the resource name of the "Geolocation" policy tag, and, for "Geolocation", this field is empty. } @@ -314,7 +314,7 @@

Method Details

], "description": "A String", # Description of this policy tag. If not set, defaults to empty. The description must contain only Unicode characters, tabs, newlines, carriage returns and page breaks, and be at most 2000 bytes long when encoded in UTF-8. "displayName": "A String", # Required. User-defined name of this policy tag. The name can't start or end with spaces and must be unique within the parent taxonomy, contain only Unicode letters, numbers, underscores, dashes and spaces, and be at most 200 bytes long when encoded in UTF-8. - "name": "A String", # Output only. Resource name of this policy tag in the URL format. The policy tag manager generates unique taxonomy IDs and policy tag IDs. + "name": "A String", # Identifier. Resource name of this policy tag in the URL format. The policy tag manager generates unique taxonomy IDs and policy tag IDs. "parentPolicyTag": "A String", # Resource name of this policy tag's parent policy tag. If empty, this is a top level tag. If not set, defaults to an empty string. For example, for the "LatLong" policy tag in the example above, this field contains the resource name of the "Geolocation" policy tag, and, for "Geolocation", this field is empty. }
diff --git a/docs/dyn/datacatalog_v1beta1.projects.locations.taxonomies.html b/docs/dyn/datacatalog_v1beta1.projects.locations.taxonomies.html index 67c231ef18f..66b4ea0592b 100644 --- a/docs/dyn/datacatalog_v1beta1.projects.locations.taxonomies.html +++ b/docs/dyn/datacatalog_v1beta1.projects.locations.taxonomies.html @@ -136,7 +136,7 @@

Method Details

], "description": "A String", # Optional. Description of this taxonomy. It must: contain only unicode characters, tabs, newlines, carriage returns and page breaks; and be at most 2000 bytes long when encoded in UTF-8. If not set, defaults to an empty description. "displayName": "A String", # Required. User defined name of this taxonomy. It must: contain only unicode letters, numbers, underscores, dashes and spaces; not start or end with spaces; and be at most 200 bytes long when encoded in UTF-8. The taxonomy display name must be unique within an organization. - "name": "A String", # Output only. Resource name of this taxonomy, whose format is: "projects/{project_number}/locations/{location_id}/taxonomies/{id}". + "name": "A String", # Identifier. Resource name of this taxonomy, whose format is: "projects/{project_number}/locations/{location_id}/taxonomies/{id}". "policyTagCount": 42, # Output only. Number of policy tags contained in this taxonomy. "service": { # The source system of the Taxonomy. # Output only. Identity of the service which owns the Taxonomy. This field is only populated when the taxonomy is created by a Google Cloud service. Currently only 'DATAPLEX' is supported. "identity": "A String", # The service agent for the service. @@ -163,7 +163,7 @@

Method Details

], "description": "A String", # Optional. Description of this taxonomy. It must: contain only unicode characters, tabs, newlines, carriage returns and page breaks; and be at most 2000 bytes long when encoded in UTF-8. If not set, defaults to an empty description. "displayName": "A String", # Required. User defined name of this taxonomy. It must: contain only unicode letters, numbers, underscores, dashes and spaces; not start or end with spaces; and be at most 200 bytes long when encoded in UTF-8. The taxonomy display name must be unique within an organization. - "name": "A String", # Output only. Resource name of this taxonomy, whose format is: "projects/{project_number}/locations/{location_id}/taxonomies/{id}". + "name": "A String", # Identifier. Resource name of this taxonomy, whose format is: "projects/{project_number}/locations/{location_id}/taxonomies/{id}". "policyTagCount": 42, # Output only. Number of policy tags contained in this taxonomy. "service": { # The source system of the Taxonomy. # Output only. Identity of the service which owns the Taxonomy. This field is only populated when the taxonomy is created by a Google Cloud service. Currently only 'DATAPLEX' is supported. "identity": "A String", # The service agent for the service. @@ -254,7 +254,7 @@

Method Details

], "description": "A String", # Optional. Description of this taxonomy. It must: contain only unicode characters, tabs, newlines, carriage returns and page breaks; and be at most 2000 bytes long when encoded in UTF-8. If not set, defaults to an empty description. "displayName": "A String", # Required. User defined name of this taxonomy. It must: contain only unicode letters, numbers, underscores, dashes and spaces; not start or end with spaces; and be at most 200 bytes long when encoded in UTF-8. The taxonomy display name must be unique within an organization. - "name": "A String", # Output only. Resource name of this taxonomy, whose format is: "projects/{project_number}/locations/{location_id}/taxonomies/{id}". + "name": "A String", # Identifier. Resource name of this taxonomy, whose format is: "projects/{project_number}/locations/{location_id}/taxonomies/{id}". "policyTagCount": 42, # Output only. Number of policy tags contained in this taxonomy. "service": { # The source system of the Taxonomy. # Output only. Identity of the service which owns the Taxonomy. This field is only populated when the taxonomy is created by a Google Cloud service. Currently only 'DATAPLEX' is supported. "identity": "A String", # The service agent for the service. @@ -360,7 +360,7 @@

Method Details

], "description": "A String", # Optional. Description of this taxonomy. It must: contain only unicode characters, tabs, newlines, carriage returns and page breaks; and be at most 2000 bytes long when encoded in UTF-8. If not set, defaults to an empty description. "displayName": "A String", # Required. User defined name of this taxonomy. It must: contain only unicode letters, numbers, underscores, dashes and spaces; not start or end with spaces; and be at most 200 bytes long when encoded in UTF-8. The taxonomy display name must be unique within an organization. - "name": "A String", # Output only. Resource name of this taxonomy, whose format is: "projects/{project_number}/locations/{location_id}/taxonomies/{id}". + "name": "A String", # Identifier. Resource name of this taxonomy, whose format is: "projects/{project_number}/locations/{location_id}/taxonomies/{id}". "policyTagCount": 42, # Output only. Number of policy tags contained in this taxonomy. "service": { # The source system of the Taxonomy. # Output only. Identity of the service which owns the Taxonomy. This field is only populated when the taxonomy is created by a Google Cloud service. Currently only 'DATAPLEX' is supported. "identity": "A String", # The service agent for the service. @@ -402,7 +402,7 @@

Method Details

], "description": "A String", # Optional. Description of this taxonomy. It must: contain only unicode characters, tabs, newlines, carriage returns and page breaks; and be at most 2000 bytes long when encoded in UTF-8. If not set, defaults to an empty description. "displayName": "A String", # Required. User defined name of this taxonomy. It must: contain only unicode letters, numbers, underscores, dashes and spaces; not start or end with spaces; and be at most 200 bytes long when encoded in UTF-8. The taxonomy display name must be unique within an organization. - "name": "A String", # Output only. Resource name of this taxonomy, whose format is: "projects/{project_number}/locations/{location_id}/taxonomies/{id}". + "name": "A String", # Identifier. Resource name of this taxonomy, whose format is: "projects/{project_number}/locations/{location_id}/taxonomies/{id}". "policyTagCount": 42, # Output only. Number of policy tags contained in this taxonomy. "service": { # The source system of the Taxonomy. # Output only. Identity of the service which owns the Taxonomy. This field is only populated when the taxonomy is created by a Google Cloud service. Currently only 'DATAPLEX' is supported. "identity": "A String", # The service agent for the service. @@ -437,7 +437,7 @@

Method Details

Updates a taxonomy.
 
 Args:
-  name: string, Output only. Resource name of this taxonomy, whose format is: "projects/{project_number}/locations/{location_id}/taxonomies/{id}". (required)
+  name: string, Identifier. Resource name of this taxonomy, whose format is: "projects/{project_number}/locations/{location_id}/taxonomies/{id}". (required)
   body: object, The request body.
     The object takes the form of:
 
@@ -447,7 +447,7 @@ 

Method Details

], "description": "A String", # Optional. Description of this taxonomy. It must: contain only unicode characters, tabs, newlines, carriage returns and page breaks; and be at most 2000 bytes long when encoded in UTF-8. If not set, defaults to an empty description. "displayName": "A String", # Required. User defined name of this taxonomy. It must: contain only unicode letters, numbers, underscores, dashes and spaces; not start or end with spaces; and be at most 200 bytes long when encoded in UTF-8. The taxonomy display name must be unique within an organization. - "name": "A String", # Output only. Resource name of this taxonomy, whose format is: "projects/{project_number}/locations/{location_id}/taxonomies/{id}". + "name": "A String", # Identifier. Resource name of this taxonomy, whose format is: "projects/{project_number}/locations/{location_id}/taxonomies/{id}". "policyTagCount": 42, # Output only. Number of policy tags contained in this taxonomy. "service": { # The source system of the Taxonomy. # Output only. Identity of the service which owns the Taxonomy. This field is only populated when the taxonomy is created by a Google Cloud service. Currently only 'DATAPLEX' is supported. "identity": "A String", # The service agent for the service. @@ -475,7 +475,7 @@

Method Details

], "description": "A String", # Optional. Description of this taxonomy. It must: contain only unicode characters, tabs, newlines, carriage returns and page breaks; and be at most 2000 bytes long when encoded in UTF-8. If not set, defaults to an empty description. "displayName": "A String", # Required. User defined name of this taxonomy. It must: contain only unicode letters, numbers, underscores, dashes and spaces; not start or end with spaces; and be at most 200 bytes long when encoded in UTF-8. The taxonomy display name must be unique within an organization. - "name": "A String", # Output only. Resource name of this taxonomy, whose format is: "projects/{project_number}/locations/{location_id}/taxonomies/{id}". + "name": "A String", # Identifier. Resource name of this taxonomy, whose format is: "projects/{project_number}/locations/{location_id}/taxonomies/{id}". "policyTagCount": 42, # Output only. Number of policy tags contained in this taxonomy. "service": { # The source system of the Taxonomy. # Output only. Identity of the service which owns the Taxonomy. This field is only populated when the taxonomy is created by a Google Cloud service. Currently only 'DATAPLEX' is supported. "identity": "A String", # The service agent for the service. diff --git a/docs/dyn/datacatalog_v1beta1.projects.locations.taxonomies.policyTags.html b/docs/dyn/datacatalog_v1beta1.projects.locations.taxonomies.policyTags.html index 225f2be6c08..7c9da643366 100644 --- a/docs/dyn/datacatalog_v1beta1.projects.locations.taxonomies.policyTags.html +++ b/docs/dyn/datacatalog_v1beta1.projects.locations.taxonomies.policyTags.html @@ -125,7 +125,7 @@

Method Details

], "description": "A String", # Description of this policy tag. It must: contain only unicode characters, tabs, newlines, carriage returns and page breaks; and be at most 2000 bytes long when encoded in UTF-8. If not set, defaults to an empty description. If not set, defaults to an empty description. "displayName": "A String", # Required. User defined name of this policy tag. It must: be unique within the parent taxonomy; contain only unicode letters, numbers, underscores, dashes and spaces; not start or end with spaces; and be at most 200 bytes long when encoded in UTF-8. - "name": "A String", # Output only. Resource name of this policy tag, whose format is: "projects/{project_number}/locations/{location_id}/taxonomies/{taxonomy_id}/policyTags/{id}". + "name": "A String", # Identifier. Resource name of this policy tag, whose format is: "projects/{project_number}/locations/{location_id}/taxonomies/{taxonomy_id}/policyTags/{id}". "parentPolicyTag": "A String", # Resource name of this policy tag's parent policy tag (e.g. for the "LatLong" policy tag in the example above, this field contains the resource name of the "Geolocation" policy tag). If empty, it means this policy tag is a top level policy tag (e.g. this field is empty for the "Geolocation" policy tag in the example above). If not set, defaults to an empty string. } @@ -143,7 +143,7 @@

Method Details

], "description": "A String", # Description of this policy tag. It must: contain only unicode characters, tabs, newlines, carriage returns and page breaks; and be at most 2000 bytes long when encoded in UTF-8. If not set, defaults to an empty description. If not set, defaults to an empty description. "displayName": "A String", # Required. User defined name of this policy tag. It must: be unique within the parent taxonomy; contain only unicode letters, numbers, underscores, dashes and spaces; not start or end with spaces; and be at most 200 bytes long when encoded in UTF-8. - "name": "A String", # Output only. Resource name of this policy tag, whose format is: "projects/{project_number}/locations/{location_id}/taxonomies/{taxonomy_id}/policyTags/{id}". + "name": "A String", # Identifier. Resource name of this policy tag, whose format is: "projects/{project_number}/locations/{location_id}/taxonomies/{taxonomy_id}/policyTags/{id}". "parentPolicyTag": "A String", # Resource name of this policy tag's parent policy tag (e.g. for the "LatLong" policy tag in the example above, this field contains the resource name of the "Geolocation" policy tag). If empty, it means this policy tag is a top level policy tag (e.g. this field is empty for the "Geolocation" policy tag in the example above). If not set, defaults to an empty string. }
@@ -186,7 +186,7 @@

Method Details

], "description": "A String", # Description of this policy tag. It must: contain only unicode characters, tabs, newlines, carriage returns and page breaks; and be at most 2000 bytes long when encoded in UTF-8. If not set, defaults to an empty description. If not set, defaults to an empty description. "displayName": "A String", # Required. User defined name of this policy tag. It must: be unique within the parent taxonomy; contain only unicode letters, numbers, underscores, dashes and spaces; not start or end with spaces; and be at most 200 bytes long when encoded in UTF-8. - "name": "A String", # Output only. Resource name of this policy tag, whose format is: "projects/{project_number}/locations/{location_id}/taxonomies/{taxonomy_id}/policyTags/{id}". + "name": "A String", # Identifier. Resource name of this policy tag, whose format is: "projects/{project_number}/locations/{location_id}/taxonomies/{taxonomy_id}/policyTags/{id}". "parentPolicyTag": "A String", # Resource name of this policy tag's parent policy tag (e.g. for the "LatLong" policy tag in the example above, this field contains the resource name of the "Geolocation" policy tag). If empty, it means this policy tag is a top level policy tag (e.g. this field is empty for the "Geolocation" policy tag in the example above). If not set, defaults to an empty string. } @@ -259,7 +259,7 @@

Method Details

], "description": "A String", # Description of this policy tag. It must: contain only unicode characters, tabs, newlines, carriage returns and page breaks; and be at most 2000 bytes long when encoded in UTF-8. If not set, defaults to an empty description. If not set, defaults to an empty description. "displayName": "A String", # Required. User defined name of this policy tag. It must: be unique within the parent taxonomy; contain only unicode letters, numbers, underscores, dashes and spaces; not start or end with spaces; and be at most 200 bytes long when encoded in UTF-8. - "name": "A String", # Output only. Resource name of this policy tag, whose format is: "projects/{project_number}/locations/{location_id}/taxonomies/{taxonomy_id}/policyTags/{id}". + "name": "A String", # Identifier. Resource name of this policy tag, whose format is: "projects/{project_number}/locations/{location_id}/taxonomies/{taxonomy_id}/policyTags/{id}". "parentPolicyTag": "A String", # Resource name of this policy tag's parent policy tag (e.g. for the "LatLong" policy tag in the example above, this field contains the resource name of the "Geolocation" policy tag). If empty, it means this policy tag is a top level policy tag (e.g. this field is empty for the "Geolocation" policy tag in the example above). If not set, defaults to an empty string. }, ], @@ -285,7 +285,7 @@

Method Details

Updates a policy tag.
 
 Args:
-  name: string, Output only. Resource name of this policy tag, whose format is: "projects/{project_number}/locations/{location_id}/taxonomies/{taxonomy_id}/policyTags/{id}". (required)
+  name: string, Identifier. Resource name of this policy tag, whose format is: "projects/{project_number}/locations/{location_id}/taxonomies/{taxonomy_id}/policyTags/{id}". (required)
   body: object, The request body.
     The object takes the form of:
 
@@ -295,7 +295,7 @@ 

Method Details

], "description": "A String", # Description of this policy tag. It must: contain only unicode characters, tabs, newlines, carriage returns and page breaks; and be at most 2000 bytes long when encoded in UTF-8. If not set, defaults to an empty description. If not set, defaults to an empty description. "displayName": "A String", # Required. User defined name of this policy tag. It must: be unique within the parent taxonomy; contain only unicode letters, numbers, underscores, dashes and spaces; not start or end with spaces; and be at most 200 bytes long when encoded in UTF-8. - "name": "A String", # Output only. Resource name of this policy tag, whose format is: "projects/{project_number}/locations/{location_id}/taxonomies/{taxonomy_id}/policyTags/{id}". + "name": "A String", # Identifier. Resource name of this policy tag, whose format is: "projects/{project_number}/locations/{location_id}/taxonomies/{taxonomy_id}/policyTags/{id}". "parentPolicyTag": "A String", # Resource name of this policy tag's parent policy tag (e.g. for the "LatLong" policy tag in the example above, this field contains the resource name of the "Geolocation" policy tag). If empty, it means this policy tag is a top level policy tag (e.g. this field is empty for the "Geolocation" policy tag in the example above). If not set, defaults to an empty string. } @@ -314,7 +314,7 @@

Method Details

], "description": "A String", # Description of this policy tag. It must: contain only unicode characters, tabs, newlines, carriage returns and page breaks; and be at most 2000 bytes long when encoded in UTF-8. If not set, defaults to an empty description. If not set, defaults to an empty description. "displayName": "A String", # Required. User defined name of this policy tag. It must: be unique within the parent taxonomy; contain only unicode letters, numbers, underscores, dashes and spaces; not start or end with spaces; and be at most 200 bytes long when encoded in UTF-8. - "name": "A String", # Output only. Resource name of this policy tag, whose format is: "projects/{project_number}/locations/{location_id}/taxonomies/{taxonomy_id}/policyTags/{id}". + "name": "A String", # Identifier. Resource name of this policy tag, whose format is: "projects/{project_number}/locations/{location_id}/taxonomies/{taxonomy_id}/policyTags/{id}". "parentPolicyTag": "A String", # Resource name of this policy tag's parent policy tag (e.g. for the "LatLong" policy tag in the example above, this field contains the resource name of the "Geolocation" policy tag). If empty, it means this policy tag is a top level policy tag (e.g. this field is empty for the "Geolocation" policy tag in the example above). If not set, defaults to an empty string. }
diff --git a/docs/dyn/dataflow_v1b3.projects.html b/docs/dyn/dataflow_v1b3.projects.html index 36e70b8c860..d73191e2e95 100644 --- a/docs/dyn/dataflow_v1b3.projects.html +++ b/docs/dyn/dataflow_v1b3.projects.html @@ -154,6 +154,14 @@

Method Details

"labels": { # Labels are used to group WorkerMessages. For example, a worker_message about a particular container might have the labels: { "JOB_ID": "2015-04-22", "WORKER_ID": "wordcount-vm-2015…" "CONTAINER_TYPE": "worker", "CONTAINER_ID": "ac1234def"} Label tags typically correspond to Label enum values. However, for ease of development other strings can be used as tags. LABEL_UNSPECIFIED should not be used here. "a_key": "A String", }, + "streamingScalingReport": { # Contains per-user worker telemetry used in streaming autoscaling. # Contains per-user worker telemetry used in streaming autoscaling. + "activeBundleCount": 42, # Current acive bundle count. + "activeThreadCount": 42, # Current acive thread count. + "maximumBundleCount": 42, # Maximum bundle count limit. + "maximumBytesCount": 42, # Maximum bytes count limit. + "maximumThreadCount": 42, # Maximum thread count limit. + "outstandingBytesCount": 42, # Current outstanding bytes count. + }, "time": "A String", # The timestamp of the worker_message. "workerHealthReport": { # WorkerHealthReport contains information about the health of a worker. The VM should be identified by the labels attached to the WorkerMessage that this health ping belongs to. # The health of a worker. "msg": "A String", # Message describing any unusual health reports. diff --git a/docs/dyn/dataflow_v1b3.projects.locations.html b/docs/dyn/dataflow_v1b3.projects.locations.html index 2f644edd884..a537da67c3e 100644 --- a/docs/dyn/dataflow_v1b3.projects.locations.html +++ b/docs/dyn/dataflow_v1b3.projects.locations.html @@ -132,6 +132,14 @@

Method Details

"labels": { # Labels are used to group WorkerMessages. For example, a worker_message about a particular container might have the labels: { "JOB_ID": "2015-04-22", "WORKER_ID": "wordcount-vm-2015…" "CONTAINER_TYPE": "worker", "CONTAINER_ID": "ac1234def"} Label tags typically correspond to Label enum values. However, for ease of development other strings can be used as tags. LABEL_UNSPECIFIED should not be used here. "a_key": "A String", }, + "streamingScalingReport": { # Contains per-user worker telemetry used in streaming autoscaling. # Contains per-user worker telemetry used in streaming autoscaling. + "activeBundleCount": 42, # Current acive bundle count. + "activeThreadCount": 42, # Current acive thread count. + "maximumBundleCount": 42, # Maximum bundle count limit. + "maximumBytesCount": 42, # Maximum bytes count limit. + "maximumThreadCount": 42, # Maximum thread count limit. + "outstandingBytesCount": 42, # Current outstanding bytes count. + }, "time": "A String", # The timestamp of the worker_message. "workerHealthReport": { # WorkerHealthReport contains information about the health of a worker. The VM should be identified by the labels attached to the WorkerMessage that this health ping belongs to. # The health of a worker. "msg": "A String", # Message describing any unusual health reports. diff --git a/docs/dyn/dataform_v1beta1.projects.locations.repositories.workspaces.html b/docs/dyn/dataform_v1beta1.projects.locations.repositories.workspaces.html index f85808f4c4e..9c1cf9e63ab 100644 --- a/docs/dyn/dataform_v1beta1.projects.locations.repositories.workspaces.html +++ b/docs/dyn/dataform_v1beta1.projects.locations.repositories.workspaces.html @@ -706,7 +706,7 @@

Method Details

Args: workspace: string, Required. The workspace's name. (required) - filter: string, Optional. Optional filter for the returned list in go/filtering format. Filtering is only currently supported on the `path` field. + filter: string, Optional. Optional filter for the returned list in filtering format. Filtering is only currently supported on the `path` field. See https://google.aip.dev/160 for details. pageSize: integer, Optional. Maximum number of search results to return. The server may return fewer items than requested. If unspecified, the server will pick an appropriate default. pageToken: string, Optional. Page token received from a previous `SearchFilesRequest` call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to `SearchFilesRequest` must match the call that provided the page token. x__xgafv: string, V1 error format. diff --git a/docs/dyn/dns_v1.changes.html b/docs/dyn/dns_v1.changes.html index 18499525cff..2f81080a4ce 100644 --- a/docs/dyn/dns_v1.changes.html +++ b/docs/dyn/dns_v1.changes.html @@ -115,8 +115,11 @@

Method Details

"enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -141,14 +144,18 @@

Method Details

], "kind": "dns#rRSetRoutingPolicyGeoPolicy", }, + "healthCheck": "A String", # The selfLink attribute of the HealthCheck resource to use for this RRSetRoutingPolicy. https://cloud.google.com/compute/docs/reference/rest/v1/healthChecks "kind": "dns#rRSetRoutingPolicy", "primaryBackup": { # Configures a RRSetRoutingPolicy such that all queries are responded with the primary_targets if they are healthy. And if all of them are unhealthy, then we fallback to a geo localized policy. "backupGeoTargets": { # Configures a RRSetRoutingPolicy that routes based on the geo location of the querying user. # Backup targets provide a regional failover policy for the otherwise global primary targets. If serving state is set to BACKUP, this policy essentially becomes a geo routing policy. "enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -174,8 +181,11 @@

Method Details

"kind": "dns#rRSetRoutingPolicyGeoPolicy", }, "kind": "dns#rRSetRoutingPolicyPrimaryBackupPolicy", - "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. - "internalLoadBalancers": [ + "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -193,8 +203,11 @@

Method Details

"wrr": { # Configures a RRSetRoutingPolicy that routes in a weighted round robin fashion. "items": [ { # A routing block which contains the routing information for one WRR item. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -239,8 +252,11 @@

Method Details

"enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -265,14 +281,18 @@

Method Details

], "kind": "dns#rRSetRoutingPolicyGeoPolicy", }, + "healthCheck": "A String", # The selfLink attribute of the HealthCheck resource to use for this RRSetRoutingPolicy. https://cloud.google.com/compute/docs/reference/rest/v1/healthChecks "kind": "dns#rRSetRoutingPolicy", "primaryBackup": { # Configures a RRSetRoutingPolicy such that all queries are responded with the primary_targets if they are healthy. And if all of them are unhealthy, then we fallback to a geo localized policy. "backupGeoTargets": { # Configures a RRSetRoutingPolicy that routes based on the geo location of the querying user. # Backup targets provide a regional failover policy for the otherwise global primary targets. If serving state is set to BACKUP, this policy essentially becomes a geo routing policy. "enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -298,8 +318,11 @@

Method Details

"kind": "dns#rRSetRoutingPolicyGeoPolicy", }, "kind": "dns#rRSetRoutingPolicyPrimaryBackupPolicy", - "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. - "internalLoadBalancers": [ + "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -317,8 +340,11 @@

Method Details

"wrr": { # Configures a RRSetRoutingPolicy that routes in a weighted round robin fashion. "items": [ { # A routing block which contains the routing information for one WRR item. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -380,8 +406,11 @@

Method Details

"enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -406,14 +435,18 @@

Method Details

], "kind": "dns#rRSetRoutingPolicyGeoPolicy", }, + "healthCheck": "A String", # The selfLink attribute of the HealthCheck resource to use for this RRSetRoutingPolicy. https://cloud.google.com/compute/docs/reference/rest/v1/healthChecks "kind": "dns#rRSetRoutingPolicy", "primaryBackup": { # Configures a RRSetRoutingPolicy such that all queries are responded with the primary_targets if they are healthy. And if all of them are unhealthy, then we fallback to a geo localized policy. "backupGeoTargets": { # Configures a RRSetRoutingPolicy that routes based on the geo location of the querying user. # Backup targets provide a regional failover policy for the otherwise global primary targets. If serving state is set to BACKUP, this policy essentially becomes a geo routing policy. "enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -439,8 +472,11 @@

Method Details

"kind": "dns#rRSetRoutingPolicyGeoPolicy", }, "kind": "dns#rRSetRoutingPolicyPrimaryBackupPolicy", - "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. - "internalLoadBalancers": [ + "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -458,8 +494,11 @@

Method Details

"wrr": { # Configures a RRSetRoutingPolicy that routes in a weighted round robin fashion. "items": [ { # A routing block which contains the routing information for one WRR item. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -504,8 +543,11 @@

Method Details

"enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -530,14 +572,18 @@

Method Details

], "kind": "dns#rRSetRoutingPolicyGeoPolicy", }, + "healthCheck": "A String", # The selfLink attribute of the HealthCheck resource to use for this RRSetRoutingPolicy. https://cloud.google.com/compute/docs/reference/rest/v1/healthChecks "kind": "dns#rRSetRoutingPolicy", "primaryBackup": { # Configures a RRSetRoutingPolicy such that all queries are responded with the primary_targets if they are healthy. And if all of them are unhealthy, then we fallback to a geo localized policy. "backupGeoTargets": { # Configures a RRSetRoutingPolicy that routes based on the geo location of the querying user. # Backup targets provide a regional failover policy for the otherwise global primary targets. If serving state is set to BACKUP, this policy essentially becomes a geo routing policy. "enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -563,8 +609,11 @@

Method Details

"kind": "dns#rRSetRoutingPolicyGeoPolicy", }, "kind": "dns#rRSetRoutingPolicyPrimaryBackupPolicy", - "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. - "internalLoadBalancers": [ + "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -582,8 +631,11 @@

Method Details

"wrr": { # Configures a RRSetRoutingPolicy that routes in a weighted round robin fashion. "items": [ { # A routing block which contains the routing information for one WRR item. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -654,8 +706,11 @@

Method Details

"enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -680,14 +735,18 @@

Method Details

], "kind": "dns#rRSetRoutingPolicyGeoPolicy", }, + "healthCheck": "A String", # The selfLink attribute of the HealthCheck resource to use for this RRSetRoutingPolicy. https://cloud.google.com/compute/docs/reference/rest/v1/healthChecks "kind": "dns#rRSetRoutingPolicy", "primaryBackup": { # Configures a RRSetRoutingPolicy such that all queries are responded with the primary_targets if they are healthy. And if all of them are unhealthy, then we fallback to a geo localized policy. "backupGeoTargets": { # Configures a RRSetRoutingPolicy that routes based on the geo location of the querying user. # Backup targets provide a regional failover policy for the otherwise global primary targets. If serving state is set to BACKUP, this policy essentially becomes a geo routing policy. "enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -713,8 +772,11 @@

Method Details

"kind": "dns#rRSetRoutingPolicyGeoPolicy", }, "kind": "dns#rRSetRoutingPolicyPrimaryBackupPolicy", - "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. - "internalLoadBalancers": [ + "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -732,8 +794,11 @@

Method Details

"wrr": { # Configures a RRSetRoutingPolicy that routes in a weighted round robin fashion. "items": [ { # A routing block which contains the routing information for one WRR item. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -778,8 +843,11 @@

Method Details

"enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -804,14 +872,18 @@

Method Details

], "kind": "dns#rRSetRoutingPolicyGeoPolicy", }, + "healthCheck": "A String", # The selfLink attribute of the HealthCheck resource to use for this RRSetRoutingPolicy. https://cloud.google.com/compute/docs/reference/rest/v1/healthChecks "kind": "dns#rRSetRoutingPolicy", "primaryBackup": { # Configures a RRSetRoutingPolicy such that all queries are responded with the primary_targets if they are healthy. And if all of them are unhealthy, then we fallback to a geo localized policy. "backupGeoTargets": { # Configures a RRSetRoutingPolicy that routes based on the geo location of the querying user. # Backup targets provide a regional failover policy for the otherwise global primary targets. If serving state is set to BACKUP, this policy essentially becomes a geo routing policy. "enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -837,8 +909,11 @@

Method Details

"kind": "dns#rRSetRoutingPolicyGeoPolicy", }, "kind": "dns#rRSetRoutingPolicyPrimaryBackupPolicy", - "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. - "internalLoadBalancers": [ + "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -856,8 +931,11 @@

Method Details

"wrr": { # Configures a RRSetRoutingPolicy that routes in a weighted round robin fashion. "items": [ { # A routing block which contains the routing information for one WRR item. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -934,8 +1012,11 @@

Method Details

"enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -960,14 +1041,18 @@

Method Details

], "kind": "dns#rRSetRoutingPolicyGeoPolicy", }, + "healthCheck": "A String", # The selfLink attribute of the HealthCheck resource to use for this RRSetRoutingPolicy. https://cloud.google.com/compute/docs/reference/rest/v1/healthChecks "kind": "dns#rRSetRoutingPolicy", "primaryBackup": { # Configures a RRSetRoutingPolicy such that all queries are responded with the primary_targets if they are healthy. And if all of them are unhealthy, then we fallback to a geo localized policy. "backupGeoTargets": { # Configures a RRSetRoutingPolicy that routes based on the geo location of the querying user. # Backup targets provide a regional failover policy for the otherwise global primary targets. If serving state is set to BACKUP, this policy essentially becomes a geo routing policy. "enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -993,8 +1078,11 @@

Method Details

"kind": "dns#rRSetRoutingPolicyGeoPolicy", }, "kind": "dns#rRSetRoutingPolicyPrimaryBackupPolicy", - "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. - "internalLoadBalancers": [ + "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -1012,8 +1100,11 @@

Method Details

"wrr": { # Configures a RRSetRoutingPolicy that routes in a weighted round robin fashion. "items": [ { # A routing block which contains the routing information for one WRR item. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -1058,8 +1149,11 @@

Method Details

"enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -1084,14 +1178,18 @@

Method Details

], "kind": "dns#rRSetRoutingPolicyGeoPolicy", }, + "healthCheck": "A String", # The selfLink attribute of the HealthCheck resource to use for this RRSetRoutingPolicy. https://cloud.google.com/compute/docs/reference/rest/v1/healthChecks "kind": "dns#rRSetRoutingPolicy", "primaryBackup": { # Configures a RRSetRoutingPolicy such that all queries are responded with the primary_targets if they are healthy. And if all of them are unhealthy, then we fallback to a geo localized policy. "backupGeoTargets": { # Configures a RRSetRoutingPolicy that routes based on the geo location of the querying user. # Backup targets provide a regional failover policy for the otherwise global primary targets. If serving state is set to BACKUP, this policy essentially becomes a geo routing policy. "enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -1117,8 +1215,11 @@

Method Details

"kind": "dns#rRSetRoutingPolicyGeoPolicy", }, "kind": "dns#rRSetRoutingPolicyPrimaryBackupPolicy", - "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. - "internalLoadBalancers": [ + "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -1136,8 +1237,11 @@

Method Details

"wrr": { # Configures a RRSetRoutingPolicy that routes in a weighted round robin fashion. "items": [ { # A routing block which contains the routing information for one WRR item. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. diff --git a/docs/dyn/dns_v1.resourceRecordSets.html b/docs/dyn/dns_v1.resourceRecordSets.html index abfca958b8b..8383e2f4783 100644 --- a/docs/dyn/dns_v1.resourceRecordSets.html +++ b/docs/dyn/dns_v1.resourceRecordSets.html @@ -119,8 +119,11 @@

Method Details

"enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -145,14 +148,18 @@

Method Details

], "kind": "dns#rRSetRoutingPolicyGeoPolicy", }, + "healthCheck": "A String", # The selfLink attribute of the HealthCheck resource to use for this RRSetRoutingPolicy. https://cloud.google.com/compute/docs/reference/rest/v1/healthChecks "kind": "dns#rRSetRoutingPolicy", "primaryBackup": { # Configures a RRSetRoutingPolicy such that all queries are responded with the primary_targets if they are healthy. And if all of them are unhealthy, then we fallback to a geo localized policy. "backupGeoTargets": { # Configures a RRSetRoutingPolicy that routes based on the geo location of the querying user. # Backup targets provide a regional failover policy for the otherwise global primary targets. If serving state is set to BACKUP, this policy essentially becomes a geo routing policy. "enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -178,8 +185,11 @@

Method Details

"kind": "dns#rRSetRoutingPolicyGeoPolicy", }, "kind": "dns#rRSetRoutingPolicyPrimaryBackupPolicy", - "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. - "internalLoadBalancers": [ + "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -197,8 +207,11 @@

Method Details

"wrr": { # Configures a RRSetRoutingPolicy that routes in a weighted round robin fashion. "items": [ { # A routing block which contains the routing information for one WRR item. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -251,8 +264,11 @@

Method Details

"enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -277,14 +293,18 @@

Method Details

], "kind": "dns#rRSetRoutingPolicyGeoPolicy", }, + "healthCheck": "A String", # The selfLink attribute of the HealthCheck resource to use for this RRSetRoutingPolicy. https://cloud.google.com/compute/docs/reference/rest/v1/healthChecks "kind": "dns#rRSetRoutingPolicy", "primaryBackup": { # Configures a RRSetRoutingPolicy such that all queries are responded with the primary_targets if they are healthy. And if all of them are unhealthy, then we fallback to a geo localized policy. "backupGeoTargets": { # Configures a RRSetRoutingPolicy that routes based on the geo location of the querying user. # Backup targets provide a regional failover policy for the otherwise global primary targets. If serving state is set to BACKUP, this policy essentially becomes a geo routing policy. "enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -310,8 +330,11 @@

Method Details

"kind": "dns#rRSetRoutingPolicyGeoPolicy", }, "kind": "dns#rRSetRoutingPolicyPrimaryBackupPolicy", - "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. - "internalLoadBalancers": [ + "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -329,8 +352,11 @@

Method Details

"wrr": { # Configures a RRSetRoutingPolicy that routes in a weighted round robin fashion. "items": [ { # A routing block which contains the routing information for one WRR item. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -415,8 +441,11 @@

Method Details

"enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -441,14 +470,18 @@

Method Details

], "kind": "dns#rRSetRoutingPolicyGeoPolicy", }, + "healthCheck": "A String", # The selfLink attribute of the HealthCheck resource to use for this RRSetRoutingPolicy. https://cloud.google.com/compute/docs/reference/rest/v1/healthChecks "kind": "dns#rRSetRoutingPolicy", "primaryBackup": { # Configures a RRSetRoutingPolicy such that all queries are responded with the primary_targets if they are healthy. And if all of them are unhealthy, then we fallback to a geo localized policy. "backupGeoTargets": { # Configures a RRSetRoutingPolicy that routes based on the geo location of the querying user. # Backup targets provide a regional failover policy for the otherwise global primary targets. If serving state is set to BACKUP, this policy essentially becomes a geo routing policy. "enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -474,8 +507,11 @@

Method Details

"kind": "dns#rRSetRoutingPolicyGeoPolicy", }, "kind": "dns#rRSetRoutingPolicyPrimaryBackupPolicy", - "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. - "internalLoadBalancers": [ + "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -493,8 +529,11 @@

Method Details

"wrr": { # Configures a RRSetRoutingPolicy that routes in a weighted round robin fashion. "items": [ { # A routing block which contains the routing information for one WRR item. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -565,8 +604,11 @@

Method Details

"enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -591,14 +633,18 @@

Method Details

], "kind": "dns#rRSetRoutingPolicyGeoPolicy", }, + "healthCheck": "A String", # The selfLink attribute of the HealthCheck resource to use for this RRSetRoutingPolicy. https://cloud.google.com/compute/docs/reference/rest/v1/healthChecks "kind": "dns#rRSetRoutingPolicy", "primaryBackup": { # Configures a RRSetRoutingPolicy such that all queries are responded with the primary_targets if they are healthy. And if all of them are unhealthy, then we fallback to a geo localized policy. "backupGeoTargets": { # Configures a RRSetRoutingPolicy that routes based on the geo location of the querying user. # Backup targets provide a regional failover policy for the otherwise global primary targets. If serving state is set to BACKUP, this policy essentially becomes a geo routing policy. "enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -624,8 +670,11 @@

Method Details

"kind": "dns#rRSetRoutingPolicyGeoPolicy", }, "kind": "dns#rRSetRoutingPolicyPrimaryBackupPolicy", - "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. - "internalLoadBalancers": [ + "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -643,8 +692,11 @@

Method Details

"wrr": { # Configures a RRSetRoutingPolicy that routes in a weighted round robin fashion. "items": [ { # A routing block which contains the routing information for one WRR item. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -717,8 +769,11 @@

Method Details

"enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -743,14 +798,18 @@

Method Details

], "kind": "dns#rRSetRoutingPolicyGeoPolicy", }, + "healthCheck": "A String", # The selfLink attribute of the HealthCheck resource to use for this RRSetRoutingPolicy. https://cloud.google.com/compute/docs/reference/rest/v1/healthChecks "kind": "dns#rRSetRoutingPolicy", "primaryBackup": { # Configures a RRSetRoutingPolicy such that all queries are responded with the primary_targets if they are healthy. And if all of them are unhealthy, then we fallback to a geo localized policy. "backupGeoTargets": { # Configures a RRSetRoutingPolicy that routes based on the geo location of the querying user. # Backup targets provide a regional failover policy for the otherwise global primary targets. If serving state is set to BACKUP, this policy essentially becomes a geo routing policy. "enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -776,8 +835,11 @@

Method Details

"kind": "dns#rRSetRoutingPolicyGeoPolicy", }, "kind": "dns#rRSetRoutingPolicyPrimaryBackupPolicy", - "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. - "internalLoadBalancers": [ + "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -795,8 +857,11 @@

Method Details

"wrr": { # Configures a RRSetRoutingPolicy that routes in a weighted round robin fashion. "items": [ { # A routing block which contains the routing information for one WRR item. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -849,8 +914,11 @@

Method Details

"enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -875,14 +943,18 @@

Method Details

], "kind": "dns#rRSetRoutingPolicyGeoPolicy", }, + "healthCheck": "A String", # The selfLink attribute of the HealthCheck resource to use for this RRSetRoutingPolicy. https://cloud.google.com/compute/docs/reference/rest/v1/healthChecks "kind": "dns#rRSetRoutingPolicy", "primaryBackup": { # Configures a RRSetRoutingPolicy such that all queries are responded with the primary_targets if they are healthy. And if all of them are unhealthy, then we fallback to a geo localized policy. "backupGeoTargets": { # Configures a RRSetRoutingPolicy that routes based on the geo location of the querying user. # Backup targets provide a regional failover policy for the otherwise global primary targets. If serving state is set to BACKUP, this policy essentially becomes a geo routing policy. "enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -908,8 +980,11 @@

Method Details

"kind": "dns#rRSetRoutingPolicyGeoPolicy", }, "kind": "dns#rRSetRoutingPolicyPrimaryBackupPolicy", - "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. - "internalLoadBalancers": [ + "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -927,8 +1002,11 @@

Method Details

"wrr": { # Configures a RRSetRoutingPolicy that routes in a weighted round robin fashion. "items": [ { # A routing block which contains the routing information for one WRR item. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. diff --git a/docs/dyn/dns_v1.responsePolicyRules.html b/docs/dyn/dns_v1.responsePolicyRules.html index 6f1d9ce1863..3fd7210438e 100644 --- a/docs/dyn/dns_v1.responsePolicyRules.html +++ b/docs/dyn/dns_v1.responsePolicyRules.html @@ -128,8 +128,11 @@

Method Details

"enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -154,14 +157,18 @@

Method Details

], "kind": "dns#rRSetRoutingPolicyGeoPolicy", }, + "healthCheck": "A String", # The selfLink attribute of the HealthCheck resource to use for this RRSetRoutingPolicy. https://cloud.google.com/compute/docs/reference/rest/v1/healthChecks "kind": "dns#rRSetRoutingPolicy", "primaryBackup": { # Configures a RRSetRoutingPolicy such that all queries are responded with the primary_targets if they are healthy. And if all of them are unhealthy, then we fallback to a geo localized policy. "backupGeoTargets": { # Configures a RRSetRoutingPolicy that routes based on the geo location of the querying user. # Backup targets provide a regional failover policy for the otherwise global primary targets. If serving state is set to BACKUP, this policy essentially becomes a geo routing policy. "enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -187,8 +194,11 @@

Method Details

"kind": "dns#rRSetRoutingPolicyGeoPolicy", }, "kind": "dns#rRSetRoutingPolicyPrimaryBackupPolicy", - "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. - "internalLoadBalancers": [ + "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -206,8 +216,11 @@

Method Details

"wrr": { # Configures a RRSetRoutingPolicy that routes in a weighted round robin fashion. "items": [ { # A routing block which contains the routing information for one WRR item. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -270,8 +283,11 @@

Method Details

"enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -296,14 +312,18 @@

Method Details

], "kind": "dns#rRSetRoutingPolicyGeoPolicy", }, + "healthCheck": "A String", # The selfLink attribute of the HealthCheck resource to use for this RRSetRoutingPolicy. https://cloud.google.com/compute/docs/reference/rest/v1/healthChecks "kind": "dns#rRSetRoutingPolicy", "primaryBackup": { # Configures a RRSetRoutingPolicy such that all queries are responded with the primary_targets if they are healthy. And if all of them are unhealthy, then we fallback to a geo localized policy. "backupGeoTargets": { # Configures a RRSetRoutingPolicy that routes based on the geo location of the querying user. # Backup targets provide a regional failover policy for the otherwise global primary targets. If serving state is set to BACKUP, this policy essentially becomes a geo routing policy. "enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -329,8 +349,11 @@

Method Details

"kind": "dns#rRSetRoutingPolicyGeoPolicy", }, "kind": "dns#rRSetRoutingPolicyPrimaryBackupPolicy", - "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. - "internalLoadBalancers": [ + "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -348,8 +371,11 @@

Method Details

"wrr": { # Configures a RRSetRoutingPolicy that routes in a weighted round robin fashion. "items": [ { # A routing block which contains the routing information for one WRR item. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -437,8 +463,11 @@

Method Details

"enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -463,14 +492,18 @@

Method Details

], "kind": "dns#rRSetRoutingPolicyGeoPolicy", }, + "healthCheck": "A String", # The selfLink attribute of the HealthCheck resource to use for this RRSetRoutingPolicy. https://cloud.google.com/compute/docs/reference/rest/v1/healthChecks "kind": "dns#rRSetRoutingPolicy", "primaryBackup": { # Configures a RRSetRoutingPolicy such that all queries are responded with the primary_targets if they are healthy. And if all of them are unhealthy, then we fallback to a geo localized policy. "backupGeoTargets": { # Configures a RRSetRoutingPolicy that routes based on the geo location of the querying user. # Backup targets provide a regional failover policy for the otherwise global primary targets. If serving state is set to BACKUP, this policy essentially becomes a geo routing policy. "enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -496,8 +529,11 @@

Method Details

"kind": "dns#rRSetRoutingPolicyGeoPolicy", }, "kind": "dns#rRSetRoutingPolicyPrimaryBackupPolicy", - "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. - "internalLoadBalancers": [ + "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -515,8 +551,11 @@

Method Details

"wrr": { # Configures a RRSetRoutingPolicy that routes in a weighted round robin fashion. "items": [ { # A routing block which contains the routing information for one WRR item. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -594,8 +633,11 @@

Method Details

"enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -620,14 +662,18 @@

Method Details

], "kind": "dns#rRSetRoutingPolicyGeoPolicy", }, + "healthCheck": "A String", # The selfLink attribute of the HealthCheck resource to use for this RRSetRoutingPolicy. https://cloud.google.com/compute/docs/reference/rest/v1/healthChecks "kind": "dns#rRSetRoutingPolicy", "primaryBackup": { # Configures a RRSetRoutingPolicy such that all queries are responded with the primary_targets if they are healthy. And if all of them are unhealthy, then we fallback to a geo localized policy. "backupGeoTargets": { # Configures a RRSetRoutingPolicy that routes based on the geo location of the querying user. # Backup targets provide a regional failover policy for the otherwise global primary targets. If serving state is set to BACKUP, this policy essentially becomes a geo routing policy. "enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -653,8 +699,11 @@

Method Details

"kind": "dns#rRSetRoutingPolicyGeoPolicy", }, "kind": "dns#rRSetRoutingPolicyPrimaryBackupPolicy", - "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. - "internalLoadBalancers": [ + "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -672,8 +721,11 @@

Method Details

"wrr": { # Configures a RRSetRoutingPolicy that routes in a weighted round robin fashion. "items": [ { # A routing block which contains the routing information for one WRR item. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -755,8 +807,11 @@

Method Details

"enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -781,14 +836,18 @@

Method Details

], "kind": "dns#rRSetRoutingPolicyGeoPolicy", }, + "healthCheck": "A String", # The selfLink attribute of the HealthCheck resource to use for this RRSetRoutingPolicy. https://cloud.google.com/compute/docs/reference/rest/v1/healthChecks "kind": "dns#rRSetRoutingPolicy", "primaryBackup": { # Configures a RRSetRoutingPolicy such that all queries are responded with the primary_targets if they are healthy. And if all of them are unhealthy, then we fallback to a geo localized policy. "backupGeoTargets": { # Configures a RRSetRoutingPolicy that routes based on the geo location of the querying user. # Backup targets provide a regional failover policy for the otherwise global primary targets. If serving state is set to BACKUP, this policy essentially becomes a geo routing policy. "enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -814,8 +873,11 @@

Method Details

"kind": "dns#rRSetRoutingPolicyGeoPolicy", }, "kind": "dns#rRSetRoutingPolicyPrimaryBackupPolicy", - "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. - "internalLoadBalancers": [ + "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -833,8 +895,11 @@

Method Details

"wrr": { # Configures a RRSetRoutingPolicy that routes in a weighted round robin fashion. "items": [ { # A routing block which contains the routing information for one WRR item. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -901,8 +966,11 @@

Method Details

"enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -927,14 +995,18 @@

Method Details

], "kind": "dns#rRSetRoutingPolicyGeoPolicy", }, + "healthCheck": "A String", # The selfLink attribute of the HealthCheck resource to use for this RRSetRoutingPolicy. https://cloud.google.com/compute/docs/reference/rest/v1/healthChecks "kind": "dns#rRSetRoutingPolicy", "primaryBackup": { # Configures a RRSetRoutingPolicy such that all queries are responded with the primary_targets if they are healthy. And if all of them are unhealthy, then we fallback to a geo localized policy. "backupGeoTargets": { # Configures a RRSetRoutingPolicy that routes based on the geo location of the querying user. # Backup targets provide a regional failover policy for the otherwise global primary targets. If serving state is set to BACKUP, this policy essentially becomes a geo routing policy. "enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -960,8 +1032,11 @@

Method Details

"kind": "dns#rRSetRoutingPolicyGeoPolicy", }, "kind": "dns#rRSetRoutingPolicyPrimaryBackupPolicy", - "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. - "internalLoadBalancers": [ + "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -979,8 +1054,11 @@

Method Details

"wrr": { # Configures a RRSetRoutingPolicy that routes in a weighted round robin fashion. "items": [ { # A routing block which contains the routing information for one WRR item. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -1047,8 +1125,11 @@

Method Details

"enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -1073,14 +1154,18 @@

Method Details

], "kind": "dns#rRSetRoutingPolicyGeoPolicy", }, + "healthCheck": "A String", # The selfLink attribute of the HealthCheck resource to use for this RRSetRoutingPolicy. https://cloud.google.com/compute/docs/reference/rest/v1/healthChecks "kind": "dns#rRSetRoutingPolicy", "primaryBackup": { # Configures a RRSetRoutingPolicy such that all queries are responded with the primary_targets if they are healthy. And if all of them are unhealthy, then we fallback to a geo localized policy. "backupGeoTargets": { # Configures a RRSetRoutingPolicy that routes based on the geo location of the querying user. # Backup targets provide a regional failover policy for the otherwise global primary targets. If serving state is set to BACKUP, this policy essentially becomes a geo routing policy. "enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -1106,8 +1191,11 @@

Method Details

"kind": "dns#rRSetRoutingPolicyGeoPolicy", }, "kind": "dns#rRSetRoutingPolicyPrimaryBackupPolicy", - "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. - "internalLoadBalancers": [ + "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -1125,8 +1213,11 @@

Method Details

"wrr": { # Configures a RRSetRoutingPolicy that routes in a weighted round robin fashion. "items": [ { # A routing block which contains the routing information for one WRR item. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -1193,8 +1284,11 @@

Method Details

"enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -1219,14 +1313,18 @@

Method Details

], "kind": "dns#rRSetRoutingPolicyGeoPolicy", }, + "healthCheck": "A String", # The selfLink attribute of the HealthCheck resource to use for this RRSetRoutingPolicy. https://cloud.google.com/compute/docs/reference/rest/v1/healthChecks "kind": "dns#rRSetRoutingPolicy", "primaryBackup": { # Configures a RRSetRoutingPolicy such that all queries are responded with the primary_targets if they are healthy. And if all of them are unhealthy, then we fallback to a geo localized policy. "backupGeoTargets": { # Configures a RRSetRoutingPolicy that routes based on the geo location of the querying user. # Backup targets provide a regional failover policy for the otherwise global primary targets. If serving state is set to BACKUP, this policy essentially becomes a geo routing policy. "enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -1252,8 +1350,11 @@

Method Details

"kind": "dns#rRSetRoutingPolicyGeoPolicy", }, "kind": "dns#rRSetRoutingPolicyPrimaryBackupPolicy", - "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. - "internalLoadBalancers": [ + "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -1271,8 +1372,11 @@

Method Details

"wrr": { # Configures a RRSetRoutingPolicy that routes in a weighted round robin fashion. "items": [ { # A routing block which contains the routing information for one WRR item. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. diff --git a/docs/dyn/dns_v1beta2.changes.html b/docs/dyn/dns_v1beta2.changes.html index df81a4104fd..7d8bdfe19fc 100644 --- a/docs/dyn/dns_v1beta2.changes.html +++ b/docs/dyn/dns_v1beta2.changes.html @@ -115,8 +115,11 @@

Method Details

"enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -145,8 +148,11 @@

Method Details

"enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -171,14 +177,18 @@

Method Details

], "kind": "dns#rRSetRoutingPolicyGeoPolicy", }, + "healthCheck": "A String", # The selfLink attribute of the HealthCheck resource to use for this RRSetRoutingPolicy. https://cloud.google.com/compute/docs/reference/rest/v1/healthChecks "kind": "dns#rRSetRoutingPolicy", "primaryBackup": { # Configures a RRSetRoutingPolicy such that all queries are responded with the primary_targets if they are healthy. And if all of them are unhealthy, then we fallback to a geo localized policy. "backupGeoTargets": { # Configures a RRSetRoutingPolicy that routes based on the geo location of the querying user. # Backup targets provide a regional failover policy for the otherwise global primary targets. If serving state is set to BACKUP, this policy essentially becomes a geo routing policy. "enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -204,8 +214,11 @@

Method Details

"kind": "dns#rRSetRoutingPolicyGeoPolicy", }, "kind": "dns#rRSetRoutingPolicyPrimaryBackupPolicy", - "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. - "internalLoadBalancers": [ + "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -223,8 +236,11 @@

Method Details

"wrr": { # Configures a RRSetRoutingPolicy that routes in a weighted round robin fashion. "items": [ { # A routing block which contains the routing information for one WRR item. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -252,8 +268,11 @@

Method Details

"wrrPolicy": { # Configures a RRSetRoutingPolicy that routes in a weighted round robin fashion. "items": [ { # A routing block which contains the routing information for one WRR item. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -298,8 +317,11 @@

Method Details

"enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -328,8 +350,11 @@

Method Details

"enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -354,14 +379,18 @@

Method Details

], "kind": "dns#rRSetRoutingPolicyGeoPolicy", }, + "healthCheck": "A String", # The selfLink attribute of the HealthCheck resource to use for this RRSetRoutingPolicy. https://cloud.google.com/compute/docs/reference/rest/v1/healthChecks "kind": "dns#rRSetRoutingPolicy", "primaryBackup": { # Configures a RRSetRoutingPolicy such that all queries are responded with the primary_targets if they are healthy. And if all of them are unhealthy, then we fallback to a geo localized policy. "backupGeoTargets": { # Configures a RRSetRoutingPolicy that routes based on the geo location of the querying user. # Backup targets provide a regional failover policy for the otherwise global primary targets. If serving state is set to BACKUP, this policy essentially becomes a geo routing policy. "enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -387,8 +416,11 @@

Method Details

"kind": "dns#rRSetRoutingPolicyGeoPolicy", }, "kind": "dns#rRSetRoutingPolicyPrimaryBackupPolicy", - "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. - "internalLoadBalancers": [ + "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -406,8 +438,11 @@

Method Details

"wrr": { # Configures a RRSetRoutingPolicy that routes in a weighted round robin fashion. "items": [ { # A routing block which contains the routing information for one WRR item. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -435,8 +470,11 @@

Method Details

"wrrPolicy": { # Configures a RRSetRoutingPolicy that routes in a weighted round robin fashion. "items": [ { # A routing block which contains the routing information for one WRR item. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -498,8 +536,11 @@

Method Details

"enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -528,8 +569,11 @@

Method Details

"enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -554,14 +598,18 @@

Method Details

], "kind": "dns#rRSetRoutingPolicyGeoPolicy", }, + "healthCheck": "A String", # The selfLink attribute of the HealthCheck resource to use for this RRSetRoutingPolicy. https://cloud.google.com/compute/docs/reference/rest/v1/healthChecks "kind": "dns#rRSetRoutingPolicy", "primaryBackup": { # Configures a RRSetRoutingPolicy such that all queries are responded with the primary_targets if they are healthy. And if all of them are unhealthy, then we fallback to a geo localized policy. "backupGeoTargets": { # Configures a RRSetRoutingPolicy that routes based on the geo location of the querying user. # Backup targets provide a regional failover policy for the otherwise global primary targets. If serving state is set to BACKUP, this policy essentially becomes a geo routing policy. "enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -587,8 +635,11 @@

Method Details

"kind": "dns#rRSetRoutingPolicyGeoPolicy", }, "kind": "dns#rRSetRoutingPolicyPrimaryBackupPolicy", - "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. - "internalLoadBalancers": [ + "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -606,8 +657,11 @@

Method Details

"wrr": { # Configures a RRSetRoutingPolicy that routes in a weighted round robin fashion. "items": [ { # A routing block which contains the routing information for one WRR item. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -635,8 +689,11 @@

Method Details

"wrrPolicy": { # Configures a RRSetRoutingPolicy that routes in a weighted round robin fashion. "items": [ { # A routing block which contains the routing information for one WRR item. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -681,8 +738,11 @@

Method Details

"enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -711,8 +771,11 @@

Method Details

"enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -737,14 +800,18 @@

Method Details

], "kind": "dns#rRSetRoutingPolicyGeoPolicy", }, + "healthCheck": "A String", # The selfLink attribute of the HealthCheck resource to use for this RRSetRoutingPolicy. https://cloud.google.com/compute/docs/reference/rest/v1/healthChecks "kind": "dns#rRSetRoutingPolicy", "primaryBackup": { # Configures a RRSetRoutingPolicy such that all queries are responded with the primary_targets if they are healthy. And if all of them are unhealthy, then we fallback to a geo localized policy. "backupGeoTargets": { # Configures a RRSetRoutingPolicy that routes based on the geo location of the querying user. # Backup targets provide a regional failover policy for the otherwise global primary targets. If serving state is set to BACKUP, this policy essentially becomes a geo routing policy. "enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -770,8 +837,11 @@

Method Details

"kind": "dns#rRSetRoutingPolicyGeoPolicy", }, "kind": "dns#rRSetRoutingPolicyPrimaryBackupPolicy", - "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. - "internalLoadBalancers": [ + "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -789,8 +859,11 @@

Method Details

"wrr": { # Configures a RRSetRoutingPolicy that routes in a weighted round robin fashion. "items": [ { # A routing block which contains the routing information for one WRR item. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -818,8 +891,11 @@

Method Details

"wrrPolicy": { # Configures a RRSetRoutingPolicy that routes in a weighted round robin fashion. "items": [ { # A routing block which contains the routing information for one WRR item. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -890,8 +966,11 @@

Method Details

"enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -920,8 +999,11 @@

Method Details

"enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -946,14 +1028,18 @@

Method Details

], "kind": "dns#rRSetRoutingPolicyGeoPolicy", }, + "healthCheck": "A String", # The selfLink attribute of the HealthCheck resource to use for this RRSetRoutingPolicy. https://cloud.google.com/compute/docs/reference/rest/v1/healthChecks "kind": "dns#rRSetRoutingPolicy", "primaryBackup": { # Configures a RRSetRoutingPolicy such that all queries are responded with the primary_targets if they are healthy. And if all of them are unhealthy, then we fallback to a geo localized policy. "backupGeoTargets": { # Configures a RRSetRoutingPolicy that routes based on the geo location of the querying user. # Backup targets provide a regional failover policy for the otherwise global primary targets. If serving state is set to BACKUP, this policy essentially becomes a geo routing policy. "enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -979,8 +1065,11 @@

Method Details

"kind": "dns#rRSetRoutingPolicyGeoPolicy", }, "kind": "dns#rRSetRoutingPolicyPrimaryBackupPolicy", - "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. - "internalLoadBalancers": [ + "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -998,8 +1087,11 @@

Method Details

"wrr": { # Configures a RRSetRoutingPolicy that routes in a weighted round robin fashion. "items": [ { # A routing block which contains the routing information for one WRR item. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -1027,8 +1119,11 @@

Method Details

"wrrPolicy": { # Configures a RRSetRoutingPolicy that routes in a weighted round robin fashion. "items": [ { # A routing block which contains the routing information for one WRR item. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -1073,8 +1168,11 @@

Method Details

"enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -1103,8 +1201,11 @@

Method Details

"enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -1129,14 +1230,18 @@

Method Details

], "kind": "dns#rRSetRoutingPolicyGeoPolicy", }, + "healthCheck": "A String", # The selfLink attribute of the HealthCheck resource to use for this RRSetRoutingPolicy. https://cloud.google.com/compute/docs/reference/rest/v1/healthChecks "kind": "dns#rRSetRoutingPolicy", "primaryBackup": { # Configures a RRSetRoutingPolicy such that all queries are responded with the primary_targets if they are healthy. And if all of them are unhealthy, then we fallback to a geo localized policy. "backupGeoTargets": { # Configures a RRSetRoutingPolicy that routes based on the geo location of the querying user. # Backup targets provide a regional failover policy for the otherwise global primary targets. If serving state is set to BACKUP, this policy essentially becomes a geo routing policy. "enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -1162,8 +1267,11 @@

Method Details

"kind": "dns#rRSetRoutingPolicyGeoPolicy", }, "kind": "dns#rRSetRoutingPolicyPrimaryBackupPolicy", - "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. - "internalLoadBalancers": [ + "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -1181,8 +1289,11 @@

Method Details

"wrr": { # Configures a RRSetRoutingPolicy that routes in a weighted round robin fashion. "items": [ { # A routing block which contains the routing information for one WRR item. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -1210,8 +1321,11 @@

Method Details

"wrrPolicy": { # Configures a RRSetRoutingPolicy that routes in a weighted round robin fashion. "items": [ { # A routing block which contains the routing information for one WRR item. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -1288,8 +1402,11 @@

Method Details

"enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -1318,8 +1435,11 @@

Method Details

"enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -1344,14 +1464,18 @@

Method Details

], "kind": "dns#rRSetRoutingPolicyGeoPolicy", }, + "healthCheck": "A String", # The selfLink attribute of the HealthCheck resource to use for this RRSetRoutingPolicy. https://cloud.google.com/compute/docs/reference/rest/v1/healthChecks "kind": "dns#rRSetRoutingPolicy", "primaryBackup": { # Configures a RRSetRoutingPolicy such that all queries are responded with the primary_targets if they are healthy. And if all of them are unhealthy, then we fallback to a geo localized policy. "backupGeoTargets": { # Configures a RRSetRoutingPolicy that routes based on the geo location of the querying user. # Backup targets provide a regional failover policy for the otherwise global primary targets. If serving state is set to BACKUP, this policy essentially becomes a geo routing policy. "enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -1377,8 +1501,11 @@

Method Details

"kind": "dns#rRSetRoutingPolicyGeoPolicy", }, "kind": "dns#rRSetRoutingPolicyPrimaryBackupPolicy", - "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. - "internalLoadBalancers": [ + "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -1396,8 +1523,11 @@

Method Details

"wrr": { # Configures a RRSetRoutingPolicy that routes in a weighted round robin fashion. "items": [ { # A routing block which contains the routing information for one WRR item. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -1425,8 +1555,11 @@

Method Details

"wrrPolicy": { # Configures a RRSetRoutingPolicy that routes in a weighted round robin fashion. "items": [ { # A routing block which contains the routing information for one WRR item. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -1471,8 +1604,11 @@

Method Details

"enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -1501,8 +1637,11 @@

Method Details

"enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -1527,14 +1666,18 @@

Method Details

], "kind": "dns#rRSetRoutingPolicyGeoPolicy", }, + "healthCheck": "A String", # The selfLink attribute of the HealthCheck resource to use for this RRSetRoutingPolicy. https://cloud.google.com/compute/docs/reference/rest/v1/healthChecks "kind": "dns#rRSetRoutingPolicy", "primaryBackup": { # Configures a RRSetRoutingPolicy such that all queries are responded with the primary_targets if they are healthy. And if all of them are unhealthy, then we fallback to a geo localized policy. "backupGeoTargets": { # Configures a RRSetRoutingPolicy that routes based on the geo location of the querying user. # Backup targets provide a regional failover policy for the otherwise global primary targets. If serving state is set to BACKUP, this policy essentially becomes a geo routing policy. "enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -1560,8 +1703,11 @@

Method Details

"kind": "dns#rRSetRoutingPolicyGeoPolicy", }, "kind": "dns#rRSetRoutingPolicyPrimaryBackupPolicy", - "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. - "internalLoadBalancers": [ + "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -1579,8 +1725,11 @@

Method Details

"wrr": { # Configures a RRSetRoutingPolicy that routes in a weighted round robin fashion. "items": [ { # A routing block which contains the routing information for one WRR item. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -1608,8 +1757,11 @@

Method Details

"wrrPolicy": { # Configures a RRSetRoutingPolicy that routes in a weighted round robin fashion. "items": [ { # A routing block which contains the routing information for one WRR item. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. diff --git a/docs/dyn/dns_v1beta2.resourceRecordSets.html b/docs/dyn/dns_v1beta2.resourceRecordSets.html index df5442bcf59..22d5ee1e41d 100644 --- a/docs/dyn/dns_v1beta2.resourceRecordSets.html +++ b/docs/dyn/dns_v1beta2.resourceRecordSets.html @@ -119,8 +119,11 @@

Method Details

"enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -149,8 +152,11 @@

Method Details

"enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -175,14 +181,18 @@

Method Details

], "kind": "dns#rRSetRoutingPolicyGeoPolicy", }, + "healthCheck": "A String", # The selfLink attribute of the HealthCheck resource to use for this RRSetRoutingPolicy. https://cloud.google.com/compute/docs/reference/rest/v1/healthChecks "kind": "dns#rRSetRoutingPolicy", "primaryBackup": { # Configures a RRSetRoutingPolicy such that all queries are responded with the primary_targets if they are healthy. And if all of them are unhealthy, then we fallback to a geo localized policy. "backupGeoTargets": { # Configures a RRSetRoutingPolicy that routes based on the geo location of the querying user. # Backup targets provide a regional failover policy for the otherwise global primary targets. If serving state is set to BACKUP, this policy essentially becomes a geo routing policy. "enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -208,8 +218,11 @@

Method Details

"kind": "dns#rRSetRoutingPolicyGeoPolicy", }, "kind": "dns#rRSetRoutingPolicyPrimaryBackupPolicy", - "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. - "internalLoadBalancers": [ + "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -227,8 +240,11 @@

Method Details

"wrr": { # Configures a RRSetRoutingPolicy that routes in a weighted round robin fashion. "items": [ { # A routing block which contains the routing information for one WRR item. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -256,8 +272,11 @@

Method Details

"wrrPolicy": { # Configures a RRSetRoutingPolicy that routes in a weighted round robin fashion. "items": [ { # A routing block which contains the routing information for one WRR item. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -310,8 +329,11 @@

Method Details

"enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -340,8 +362,11 @@

Method Details

"enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -366,14 +391,18 @@

Method Details

], "kind": "dns#rRSetRoutingPolicyGeoPolicy", }, + "healthCheck": "A String", # The selfLink attribute of the HealthCheck resource to use for this RRSetRoutingPolicy. https://cloud.google.com/compute/docs/reference/rest/v1/healthChecks "kind": "dns#rRSetRoutingPolicy", "primaryBackup": { # Configures a RRSetRoutingPolicy such that all queries are responded with the primary_targets if they are healthy. And if all of them are unhealthy, then we fallback to a geo localized policy. "backupGeoTargets": { # Configures a RRSetRoutingPolicy that routes based on the geo location of the querying user. # Backup targets provide a regional failover policy for the otherwise global primary targets. If serving state is set to BACKUP, this policy essentially becomes a geo routing policy. "enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -399,8 +428,11 @@

Method Details

"kind": "dns#rRSetRoutingPolicyGeoPolicy", }, "kind": "dns#rRSetRoutingPolicyPrimaryBackupPolicy", - "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. - "internalLoadBalancers": [ + "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -418,8 +450,11 @@

Method Details

"wrr": { # Configures a RRSetRoutingPolicy that routes in a weighted round robin fashion. "items": [ { # A routing block which contains the routing information for one WRR item. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -447,8 +482,11 @@

Method Details

"wrrPolicy": { # Configures a RRSetRoutingPolicy that routes in a weighted round robin fashion. "items": [ { # A routing block which contains the routing information for one WRR item. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -528,8 +566,11 @@

Method Details

"enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -558,8 +599,11 @@

Method Details

"enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -584,14 +628,18 @@

Method Details

], "kind": "dns#rRSetRoutingPolicyGeoPolicy", }, + "healthCheck": "A String", # The selfLink attribute of the HealthCheck resource to use for this RRSetRoutingPolicy. https://cloud.google.com/compute/docs/reference/rest/v1/healthChecks "kind": "dns#rRSetRoutingPolicy", "primaryBackup": { # Configures a RRSetRoutingPolicy such that all queries are responded with the primary_targets if they are healthy. And if all of them are unhealthy, then we fallback to a geo localized policy. "backupGeoTargets": { # Configures a RRSetRoutingPolicy that routes based on the geo location of the querying user. # Backup targets provide a regional failover policy for the otherwise global primary targets. If serving state is set to BACKUP, this policy essentially becomes a geo routing policy. "enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -617,8 +665,11 @@

Method Details

"kind": "dns#rRSetRoutingPolicyGeoPolicy", }, "kind": "dns#rRSetRoutingPolicyPrimaryBackupPolicy", - "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. - "internalLoadBalancers": [ + "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -636,8 +687,11 @@

Method Details

"wrr": { # Configures a RRSetRoutingPolicy that routes in a weighted round robin fashion. "items": [ { # A routing block which contains the routing information for one WRR item. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -665,8 +719,11 @@

Method Details

"wrrPolicy": { # Configures a RRSetRoutingPolicy that routes in a weighted round robin fashion. "items": [ { # A routing block which contains the routing information for one WRR item. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -737,8 +794,11 @@

Method Details

"enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -767,8 +827,11 @@

Method Details

"enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -793,14 +856,18 @@

Method Details

], "kind": "dns#rRSetRoutingPolicyGeoPolicy", }, + "healthCheck": "A String", # The selfLink attribute of the HealthCheck resource to use for this RRSetRoutingPolicy. https://cloud.google.com/compute/docs/reference/rest/v1/healthChecks "kind": "dns#rRSetRoutingPolicy", "primaryBackup": { # Configures a RRSetRoutingPolicy such that all queries are responded with the primary_targets if they are healthy. And if all of them are unhealthy, then we fallback to a geo localized policy. "backupGeoTargets": { # Configures a RRSetRoutingPolicy that routes based on the geo location of the querying user. # Backup targets provide a regional failover policy for the otherwise global primary targets. If serving state is set to BACKUP, this policy essentially becomes a geo routing policy. "enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -826,8 +893,11 @@

Method Details

"kind": "dns#rRSetRoutingPolicyGeoPolicy", }, "kind": "dns#rRSetRoutingPolicyPrimaryBackupPolicy", - "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. - "internalLoadBalancers": [ + "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -845,8 +915,11 @@

Method Details

"wrr": { # Configures a RRSetRoutingPolicy that routes in a weighted round robin fashion. "items": [ { # A routing block which contains the routing information for one WRR item. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -874,8 +947,11 @@

Method Details

"wrrPolicy": { # Configures a RRSetRoutingPolicy that routes in a weighted round robin fashion. "items": [ { # A routing block which contains the routing information for one WRR item. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -948,8 +1024,11 @@

Method Details

"enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -978,8 +1057,11 @@

Method Details

"enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -1004,14 +1086,18 @@

Method Details

], "kind": "dns#rRSetRoutingPolicyGeoPolicy", }, + "healthCheck": "A String", # The selfLink attribute of the HealthCheck resource to use for this RRSetRoutingPolicy. https://cloud.google.com/compute/docs/reference/rest/v1/healthChecks "kind": "dns#rRSetRoutingPolicy", "primaryBackup": { # Configures a RRSetRoutingPolicy such that all queries are responded with the primary_targets if they are healthy. And if all of them are unhealthy, then we fallback to a geo localized policy. "backupGeoTargets": { # Configures a RRSetRoutingPolicy that routes based on the geo location of the querying user. # Backup targets provide a regional failover policy for the otherwise global primary targets. If serving state is set to BACKUP, this policy essentially becomes a geo routing policy. "enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -1037,8 +1123,11 @@

Method Details

"kind": "dns#rRSetRoutingPolicyGeoPolicy", }, "kind": "dns#rRSetRoutingPolicyPrimaryBackupPolicy", - "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. - "internalLoadBalancers": [ + "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -1056,8 +1145,11 @@

Method Details

"wrr": { # Configures a RRSetRoutingPolicy that routes in a weighted round robin fashion. "items": [ { # A routing block which contains the routing information for one WRR item. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -1085,8 +1177,11 @@

Method Details

"wrrPolicy": { # Configures a RRSetRoutingPolicy that routes in a weighted round robin fashion. "items": [ { # A routing block which contains the routing information for one WRR item. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -1139,8 +1234,11 @@

Method Details

"enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -1169,8 +1267,11 @@

Method Details

"enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -1195,14 +1296,18 @@

Method Details

], "kind": "dns#rRSetRoutingPolicyGeoPolicy", }, + "healthCheck": "A String", # The selfLink attribute of the HealthCheck resource to use for this RRSetRoutingPolicy. https://cloud.google.com/compute/docs/reference/rest/v1/healthChecks "kind": "dns#rRSetRoutingPolicy", "primaryBackup": { # Configures a RRSetRoutingPolicy such that all queries are responded with the primary_targets if they are healthy. And if all of them are unhealthy, then we fallback to a geo localized policy. "backupGeoTargets": { # Configures a RRSetRoutingPolicy that routes based on the geo location of the querying user. # Backup targets provide a regional failover policy for the otherwise global primary targets. If serving state is set to BACKUP, this policy essentially becomes a geo routing policy. "enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -1228,8 +1333,11 @@

Method Details

"kind": "dns#rRSetRoutingPolicyGeoPolicy", }, "kind": "dns#rRSetRoutingPolicyPrimaryBackupPolicy", - "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. - "internalLoadBalancers": [ + "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -1247,8 +1355,11 @@

Method Details

"wrr": { # Configures a RRSetRoutingPolicy that routes in a weighted round robin fashion. "items": [ { # A routing block which contains the routing information for one WRR item. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -1276,8 +1387,11 @@

Method Details

"wrrPolicy": { # Configures a RRSetRoutingPolicy that routes in a weighted round robin fashion. "items": [ { # A routing block which contains the routing information for one WRR item. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. diff --git a/docs/dyn/dns_v1beta2.responsePolicyRules.html b/docs/dyn/dns_v1beta2.responsePolicyRules.html index 4012e1f91b8..bbbd2542396 100644 --- a/docs/dyn/dns_v1beta2.responsePolicyRules.html +++ b/docs/dyn/dns_v1beta2.responsePolicyRules.html @@ -128,8 +128,11 @@

Method Details

"enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -158,8 +161,11 @@

Method Details

"enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -184,14 +190,18 @@

Method Details

], "kind": "dns#rRSetRoutingPolicyGeoPolicy", }, + "healthCheck": "A String", # The selfLink attribute of the HealthCheck resource to use for this RRSetRoutingPolicy. https://cloud.google.com/compute/docs/reference/rest/v1/healthChecks "kind": "dns#rRSetRoutingPolicy", "primaryBackup": { # Configures a RRSetRoutingPolicy such that all queries are responded with the primary_targets if they are healthy. And if all of them are unhealthy, then we fallback to a geo localized policy. "backupGeoTargets": { # Configures a RRSetRoutingPolicy that routes based on the geo location of the querying user. # Backup targets provide a regional failover policy for the otherwise global primary targets. If serving state is set to BACKUP, this policy essentially becomes a geo routing policy. "enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -217,8 +227,11 @@

Method Details

"kind": "dns#rRSetRoutingPolicyGeoPolicy", }, "kind": "dns#rRSetRoutingPolicyPrimaryBackupPolicy", - "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. - "internalLoadBalancers": [ + "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -236,8 +249,11 @@

Method Details

"wrr": { # Configures a RRSetRoutingPolicy that routes in a weighted round robin fashion. "items": [ { # A routing block which contains the routing information for one WRR item. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -265,8 +281,11 @@

Method Details

"wrrPolicy": { # Configures a RRSetRoutingPolicy that routes in a weighted round robin fashion. "items": [ { # A routing block which contains the routing information for one WRR item. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -329,8 +348,11 @@

Method Details

"enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -359,8 +381,11 @@

Method Details

"enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -385,14 +410,18 @@

Method Details

], "kind": "dns#rRSetRoutingPolicyGeoPolicy", }, + "healthCheck": "A String", # The selfLink attribute of the HealthCheck resource to use for this RRSetRoutingPolicy. https://cloud.google.com/compute/docs/reference/rest/v1/healthChecks "kind": "dns#rRSetRoutingPolicy", "primaryBackup": { # Configures a RRSetRoutingPolicy such that all queries are responded with the primary_targets if they are healthy. And if all of them are unhealthy, then we fallback to a geo localized policy. "backupGeoTargets": { # Configures a RRSetRoutingPolicy that routes based on the geo location of the querying user. # Backup targets provide a regional failover policy for the otherwise global primary targets. If serving state is set to BACKUP, this policy essentially becomes a geo routing policy. "enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -418,8 +447,11 @@

Method Details

"kind": "dns#rRSetRoutingPolicyGeoPolicy", }, "kind": "dns#rRSetRoutingPolicyPrimaryBackupPolicy", - "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. - "internalLoadBalancers": [ + "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -437,8 +469,11 @@

Method Details

"wrr": { # Configures a RRSetRoutingPolicy that routes in a weighted round robin fashion. "items": [ { # A routing block which contains the routing information for one WRR item. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -466,8 +501,11 @@

Method Details

"wrrPolicy": { # Configures a RRSetRoutingPolicy that routes in a weighted round robin fashion. "items": [ { # A routing block which contains the routing information for one WRR item. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -555,8 +593,11 @@

Method Details

"enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -585,8 +626,11 @@

Method Details

"enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -611,14 +655,18 @@

Method Details

], "kind": "dns#rRSetRoutingPolicyGeoPolicy", }, + "healthCheck": "A String", # The selfLink attribute of the HealthCheck resource to use for this RRSetRoutingPolicy. https://cloud.google.com/compute/docs/reference/rest/v1/healthChecks "kind": "dns#rRSetRoutingPolicy", "primaryBackup": { # Configures a RRSetRoutingPolicy such that all queries are responded with the primary_targets if they are healthy. And if all of them are unhealthy, then we fallback to a geo localized policy. "backupGeoTargets": { # Configures a RRSetRoutingPolicy that routes based on the geo location of the querying user. # Backup targets provide a regional failover policy for the otherwise global primary targets. If serving state is set to BACKUP, this policy essentially becomes a geo routing policy. "enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -644,8 +692,11 @@

Method Details

"kind": "dns#rRSetRoutingPolicyGeoPolicy", }, "kind": "dns#rRSetRoutingPolicyPrimaryBackupPolicy", - "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. - "internalLoadBalancers": [ + "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -663,8 +714,11 @@

Method Details

"wrr": { # Configures a RRSetRoutingPolicy that routes in a weighted round robin fashion. "items": [ { # A routing block which contains the routing information for one WRR item. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -692,8 +746,11 @@

Method Details

"wrrPolicy": { # Configures a RRSetRoutingPolicy that routes in a weighted round robin fashion. "items": [ { # A routing block which contains the routing information for one WRR item. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -771,8 +828,11 @@

Method Details

"enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -801,8 +861,11 @@

Method Details

"enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -827,14 +890,18 @@

Method Details

], "kind": "dns#rRSetRoutingPolicyGeoPolicy", }, + "healthCheck": "A String", # The selfLink attribute of the HealthCheck resource to use for this RRSetRoutingPolicy. https://cloud.google.com/compute/docs/reference/rest/v1/healthChecks "kind": "dns#rRSetRoutingPolicy", "primaryBackup": { # Configures a RRSetRoutingPolicy such that all queries are responded with the primary_targets if they are healthy. And if all of them are unhealthy, then we fallback to a geo localized policy. "backupGeoTargets": { # Configures a RRSetRoutingPolicy that routes based on the geo location of the querying user. # Backup targets provide a regional failover policy for the otherwise global primary targets. If serving state is set to BACKUP, this policy essentially becomes a geo routing policy. "enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -860,8 +927,11 @@

Method Details

"kind": "dns#rRSetRoutingPolicyGeoPolicy", }, "kind": "dns#rRSetRoutingPolicyPrimaryBackupPolicy", - "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. - "internalLoadBalancers": [ + "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -879,8 +949,11 @@

Method Details

"wrr": { # Configures a RRSetRoutingPolicy that routes in a weighted round robin fashion. "items": [ { # A routing block which contains the routing information for one WRR item. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -908,8 +981,11 @@

Method Details

"wrrPolicy": { # Configures a RRSetRoutingPolicy that routes in a weighted round robin fashion. "items": [ { # A routing block which contains the routing information for one WRR item. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -991,8 +1067,11 @@

Method Details

"enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -1021,8 +1100,11 @@

Method Details

"enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -1047,14 +1129,18 @@

Method Details

], "kind": "dns#rRSetRoutingPolicyGeoPolicy", }, + "healthCheck": "A String", # The selfLink attribute of the HealthCheck resource to use for this RRSetRoutingPolicy. https://cloud.google.com/compute/docs/reference/rest/v1/healthChecks "kind": "dns#rRSetRoutingPolicy", "primaryBackup": { # Configures a RRSetRoutingPolicy such that all queries are responded with the primary_targets if they are healthy. And if all of them are unhealthy, then we fallback to a geo localized policy. "backupGeoTargets": { # Configures a RRSetRoutingPolicy that routes based on the geo location of the querying user. # Backup targets provide a regional failover policy for the otherwise global primary targets. If serving state is set to BACKUP, this policy essentially becomes a geo routing policy. "enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -1080,8 +1166,11 @@

Method Details

"kind": "dns#rRSetRoutingPolicyGeoPolicy", }, "kind": "dns#rRSetRoutingPolicyPrimaryBackupPolicy", - "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. - "internalLoadBalancers": [ + "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -1099,8 +1188,11 @@

Method Details

"wrr": { # Configures a RRSetRoutingPolicy that routes in a weighted round robin fashion. "items": [ { # A routing block which contains the routing information for one WRR item. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -1128,8 +1220,11 @@

Method Details

"wrrPolicy": { # Configures a RRSetRoutingPolicy that routes in a weighted round robin fashion. "items": [ { # A routing block which contains the routing information for one WRR item. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -1196,8 +1291,11 @@

Method Details

"enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -1226,8 +1324,11 @@

Method Details

"enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -1252,14 +1353,18 @@

Method Details

], "kind": "dns#rRSetRoutingPolicyGeoPolicy", }, + "healthCheck": "A String", # The selfLink attribute of the HealthCheck resource to use for this RRSetRoutingPolicy. https://cloud.google.com/compute/docs/reference/rest/v1/healthChecks "kind": "dns#rRSetRoutingPolicy", "primaryBackup": { # Configures a RRSetRoutingPolicy such that all queries are responded with the primary_targets if they are healthy. And if all of them are unhealthy, then we fallback to a geo localized policy. "backupGeoTargets": { # Configures a RRSetRoutingPolicy that routes based on the geo location of the querying user. # Backup targets provide a regional failover policy for the otherwise global primary targets. If serving state is set to BACKUP, this policy essentially becomes a geo routing policy. "enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -1285,8 +1390,11 @@

Method Details

"kind": "dns#rRSetRoutingPolicyGeoPolicy", }, "kind": "dns#rRSetRoutingPolicyPrimaryBackupPolicy", - "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. - "internalLoadBalancers": [ + "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -1304,8 +1412,11 @@

Method Details

"wrr": { # Configures a RRSetRoutingPolicy that routes in a weighted round robin fashion. "items": [ { # A routing block which contains the routing information for one WRR item. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -1333,8 +1444,11 @@

Method Details

"wrrPolicy": { # Configures a RRSetRoutingPolicy that routes in a weighted round robin fashion. "items": [ { # A routing block which contains the routing information for one WRR item. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -1401,8 +1515,11 @@

Method Details

"enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -1431,8 +1548,11 @@

Method Details

"enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -1457,14 +1577,18 @@

Method Details

], "kind": "dns#rRSetRoutingPolicyGeoPolicy", }, + "healthCheck": "A String", # The selfLink attribute of the HealthCheck resource to use for this RRSetRoutingPolicy. https://cloud.google.com/compute/docs/reference/rest/v1/healthChecks "kind": "dns#rRSetRoutingPolicy", "primaryBackup": { # Configures a RRSetRoutingPolicy such that all queries are responded with the primary_targets if they are healthy. And if all of them are unhealthy, then we fallback to a geo localized policy. "backupGeoTargets": { # Configures a RRSetRoutingPolicy that routes based on the geo location of the querying user. # Backup targets provide a regional failover policy for the otherwise global primary targets. If serving state is set to BACKUP, this policy essentially becomes a geo routing policy. "enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -1490,8 +1614,11 @@

Method Details

"kind": "dns#rRSetRoutingPolicyGeoPolicy", }, "kind": "dns#rRSetRoutingPolicyPrimaryBackupPolicy", - "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. - "internalLoadBalancers": [ + "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -1509,8 +1636,11 @@

Method Details

"wrr": { # Configures a RRSetRoutingPolicy that routes in a weighted round robin fashion. "items": [ { # A routing block which contains the routing information for one WRR item. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -1538,8 +1668,11 @@

Method Details

"wrrPolicy": { # Configures a RRSetRoutingPolicy that routes in a weighted round robin fashion. "items": [ { # A routing block which contains the routing information for one WRR item. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -1606,8 +1739,11 @@

Method Details

"enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -1636,8 +1772,11 @@

Method Details

"enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -1662,14 +1801,18 @@

Method Details

], "kind": "dns#rRSetRoutingPolicyGeoPolicy", }, + "healthCheck": "A String", # The selfLink attribute of the HealthCheck resource to use for this RRSetRoutingPolicy. https://cloud.google.com/compute/docs/reference/rest/v1/healthChecks "kind": "dns#rRSetRoutingPolicy", "primaryBackup": { # Configures a RRSetRoutingPolicy such that all queries are responded with the primary_targets if they are healthy. And if all of them are unhealthy, then we fallback to a geo localized policy. "backupGeoTargets": { # Configures a RRSetRoutingPolicy that routes based on the geo location of the querying user. # Backup targets provide a regional failover policy for the otherwise global primary targets. If serving state is set to BACKUP, this policy essentially becomes a geo routing policy. "enableFencing": True or False, # Without fencing, if health check fails for all configured items in the current geo bucket, we failover to the next nearest geo bucket. With fencing, if health checking is enabled, as long as some targets in the current geo bucket are healthy, we return only the healthy targets. However, if all targets are unhealthy, we don't failover to the next nearest bucket; instead, we return all the items in the current bucket even when all targets are unhealthy. "items": [ # The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead. { # ResourceRecordSet data for one geo location. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -1695,8 +1838,11 @@

Method Details

"kind": "dns#rRSetRoutingPolicyGeoPolicy", }, "kind": "dns#rRSetRoutingPolicyPrimaryBackupPolicy", - "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. - "internalLoadBalancers": [ + "primaryTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. Unhealthy endpoints are omitted from the results. If all endpoints are unhealthy, we serve a response based on the backup_geo_targets. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -1714,8 +1860,11 @@

Method Details

"wrr": { # Configures a RRSetRoutingPolicy that routes in a weighted round robin fashion. "items": [ { # A routing block which contains the routing information for one WRR item. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. @@ -1743,8 +1892,11 @@

Method Details

"wrrPolicy": { # Configures a RRSetRoutingPolicy that routes in a weighted round robin fashion. "items": [ { # A routing block which contains the routing information for one WRR item. - "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. - "internalLoadBalancers": [ + "healthCheckedTargets": { # HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set. # Endpoints that are health checked before making the routing decision. The unhealthy endpoints are omitted from the result. If all endpoints within a bucket are unhealthy, we choose a different bucket (sampled with respect to its weight) for responding. If DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set. + "externalEndpoints": [ # The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + "A String", + ], + "internalLoadBalancers": [ # Configuration for internal load balancers to be health checked. { # The configuration for an individual load balancer to health check. "ipAddress": "A String", # The frontend IP address of the load balancer to health check. "ipProtocol": "A String", # The protocol of the load balancer to health check. diff --git a/docs/dyn/drive_v3.changes.html b/docs/dyn/drive_v3.changes.html index 57a76373857..21a6c11ca42 100644 --- a/docs/dyn/drive_v3.changes.html +++ b/docs/dyn/drive_v3.changes.html @@ -186,7 +186,7 @@

Method Details

"kind": "drive#drive", # Output only. Identifies what kind of resource this is. Value: the fixed string `"drive#drive"`. "name": "A String", # The name of this shared drive. "orgUnitId": "A String", # Output only. The organizational unit of this shared drive. This field is only populated on `drives.list` responses when the `useDomainAdminAccess` parameter is set to `true`. - "restrictions": { # A set of restrictions that apply to this shared drive or items inside this shared drive. + "restrictions": { # A set of restrictions that apply to this shared drive or items inside this shared drive. Note that restrictions can't be set when creating a shared drive. To add a restriction, first create a shared drive and then use `drives.update` to add restrictions. "adminManagedRestrictions": True or False, # Whether administrative privileges on this shared drive are required to modify restrictions. "copyRequiresWriterPermission": True or False, # Whether the options to copy, print, or download files inside this shared drive, should be disabled for readers and commenters. When this restriction is set to `true`, it will override the similarly named field to `true` for any file inside this shared drive. "domainUsersOnly": True or False, # Whether access to this shared drive and items inside this shared drive is restricted to users of the domain to which this shared drive belongs. This restriction may be overridden by other sharing policies controlled outside of this shared drive. diff --git a/docs/dyn/drive_v3.drives.html b/docs/dyn/drive_v3.drives.html index 6936314f845..769cad5458d 100644 --- a/docs/dyn/drive_v3.drives.html +++ b/docs/dyn/drive_v3.drives.html @@ -153,7 +153,7 @@

Method Details

"kind": "drive#drive", # Output only. Identifies what kind of resource this is. Value: the fixed string `"drive#drive"`. "name": "A String", # The name of this shared drive. "orgUnitId": "A String", # Output only. The organizational unit of this shared drive. This field is only populated on `drives.list` responses when the `useDomainAdminAccess` parameter is set to `true`. - "restrictions": { # A set of restrictions that apply to this shared drive or items inside this shared drive. + "restrictions": { # A set of restrictions that apply to this shared drive or items inside this shared drive. Note that restrictions can't be set when creating a shared drive. To add a restriction, first create a shared drive and then use `drives.update` to add restrictions. "adminManagedRestrictions": True or False, # Whether administrative privileges on this shared drive are required to modify restrictions. "copyRequiresWriterPermission": True or False, # Whether the options to copy, print, or download files inside this shared drive, should be disabled for readers and commenters. When this restriction is set to `true`, it will override the similarly named field to `true` for any file inside this shared drive. "domainUsersOnly": True or False, # Whether access to this shared drive and items inside this shared drive is restricted to users of the domain to which this shared drive belongs. This restriction may be overridden by other sharing policies controlled outside of this shared drive. @@ -208,7 +208,7 @@

Method Details

"kind": "drive#drive", # Output only. Identifies what kind of resource this is. Value: the fixed string `"drive#drive"`. "name": "A String", # The name of this shared drive. "orgUnitId": "A String", # Output only. The organizational unit of this shared drive. This field is only populated on `drives.list` responses when the `useDomainAdminAccess` parameter is set to `true`. - "restrictions": { # A set of restrictions that apply to this shared drive or items inside this shared drive. + "restrictions": { # A set of restrictions that apply to this shared drive or items inside this shared drive. Note that restrictions can't be set when creating a shared drive. To add a restriction, first create a shared drive and then use `drives.update` to add restrictions. "adminManagedRestrictions": True or False, # Whether administrative privileges on this shared drive are required to modify restrictions. "copyRequiresWriterPermission": True or False, # Whether the options to copy, print, or download files inside this shared drive, should be disabled for readers and commenters. When this restriction is set to `true`, it will override the similarly named field to `true` for any file inside this shared drive. "domainUsersOnly": True or False, # Whether access to this shared drive and items inside this shared drive is restricted to users of the domain to which this shared drive belongs. This restriction may be overridden by other sharing policies controlled outside of this shared drive. @@ -286,7 +286,7 @@

Method Details

"kind": "drive#drive", # Output only. Identifies what kind of resource this is. Value: the fixed string `"drive#drive"`. "name": "A String", # The name of this shared drive. "orgUnitId": "A String", # Output only. The organizational unit of this shared drive. This field is only populated on `drives.list` responses when the `useDomainAdminAccess` parameter is set to `true`. - "restrictions": { # A set of restrictions that apply to this shared drive or items inside this shared drive. + "restrictions": { # A set of restrictions that apply to this shared drive or items inside this shared drive. Note that restrictions can't be set when creating a shared drive. To add a restriction, first create a shared drive and then use `drives.update` to add restrictions. "adminManagedRestrictions": True or False, # Whether administrative privileges on this shared drive are required to modify restrictions. "copyRequiresWriterPermission": True or False, # Whether the options to copy, print, or download files inside this shared drive, should be disabled for readers and commenters. When this restriction is set to `true`, it will override the similarly named field to `true` for any file inside this shared drive. "domainUsersOnly": True or False, # Whether access to this shared drive and items inside this shared drive is restricted to users of the domain to which this shared drive belongs. This restriction may be overridden by other sharing policies controlled outside of this shared drive. @@ -348,7 +348,7 @@

Method Details

"kind": "drive#drive", # Output only. Identifies what kind of resource this is. Value: the fixed string `"drive#drive"`. "name": "A String", # The name of this shared drive. "orgUnitId": "A String", # Output only. The organizational unit of this shared drive. This field is only populated on `drives.list` responses when the `useDomainAdminAccess` parameter is set to `true`. - "restrictions": { # A set of restrictions that apply to this shared drive or items inside this shared drive. + "restrictions": { # A set of restrictions that apply to this shared drive or items inside this shared drive. Note that restrictions can't be set when creating a shared drive. To add a restriction, first create a shared drive and then use `drives.update` to add restrictions. "adminManagedRestrictions": True or False, # Whether administrative privileges on this shared drive are required to modify restrictions. "copyRequiresWriterPermission": True or False, # Whether the options to copy, print, or download files inside this shared drive, should be disabled for readers and commenters. When this restriction is set to `true`, it will override the similarly named field to `true` for any file inside this shared drive. "domainUsersOnly": True or False, # Whether access to this shared drive and items inside this shared drive is restricted to users of the domain to which this shared drive belongs. This restriction may be overridden by other sharing policies controlled outside of this shared drive. @@ -415,7 +415,7 @@

Method Details

"kind": "drive#drive", # Output only. Identifies what kind of resource this is. Value: the fixed string `"drive#drive"`. "name": "A String", # The name of this shared drive. "orgUnitId": "A String", # Output only. The organizational unit of this shared drive. This field is only populated on `drives.list` responses when the `useDomainAdminAccess` parameter is set to `true`. - "restrictions": { # A set of restrictions that apply to this shared drive or items inside this shared drive. + "restrictions": { # A set of restrictions that apply to this shared drive or items inside this shared drive. Note that restrictions can't be set when creating a shared drive. To add a restriction, first create a shared drive and then use `drives.update` to add restrictions. "adminManagedRestrictions": True or False, # Whether administrative privileges on this shared drive are required to modify restrictions. "copyRequiresWriterPermission": True or False, # Whether the options to copy, print, or download files inside this shared drive, should be disabled for readers and commenters. When this restriction is set to `true`, it will override the similarly named field to `true` for any file inside this shared drive. "domainUsersOnly": True or False, # Whether access to this shared drive and items inside this shared drive is restricted to users of the domain to which this shared drive belongs. This restriction may be overridden by other sharing policies controlled outside of this shared drive. @@ -495,7 +495,7 @@

Method Details

"kind": "drive#drive", # Output only. Identifies what kind of resource this is. Value: the fixed string `"drive#drive"`. "name": "A String", # The name of this shared drive. "orgUnitId": "A String", # Output only. The organizational unit of this shared drive. This field is only populated on `drives.list` responses when the `useDomainAdminAccess` parameter is set to `true`. - "restrictions": { # A set of restrictions that apply to this shared drive or items inside this shared drive. + "restrictions": { # A set of restrictions that apply to this shared drive or items inside this shared drive. Note that restrictions can't be set when creating a shared drive. To add a restriction, first create a shared drive and then use `drives.update` to add restrictions. "adminManagedRestrictions": True or False, # Whether administrative privileges on this shared drive are required to modify restrictions. "copyRequiresWriterPermission": True or False, # Whether the options to copy, print, or download files inside this shared drive, should be disabled for readers and commenters. When this restriction is set to `true`, it will override the similarly named field to `true` for any file inside this shared drive. "domainUsersOnly": True or False, # Whether access to this shared drive and items inside this shared drive is restricted to users of the domain to which this shared drive belongs. This restriction may be overridden by other sharing policies controlled outside of this shared drive. @@ -552,7 +552,7 @@

Method Details

"kind": "drive#drive", # Output only. Identifies what kind of resource this is. Value: the fixed string `"drive#drive"`. "name": "A String", # The name of this shared drive. "orgUnitId": "A String", # Output only. The organizational unit of this shared drive. This field is only populated on `drives.list` responses when the `useDomainAdminAccess` parameter is set to `true`. - "restrictions": { # A set of restrictions that apply to this shared drive or items inside this shared drive. + "restrictions": { # A set of restrictions that apply to this shared drive or items inside this shared drive. Note that restrictions can't be set when creating a shared drive. To add a restriction, first create a shared drive and then use `drives.update` to add restrictions. "adminManagedRestrictions": True or False, # Whether administrative privileges on this shared drive are required to modify restrictions. "copyRequiresWriterPermission": True or False, # Whether the options to copy, print, or download files inside this shared drive, should be disabled for readers and commenters. When this restriction is set to `true`, it will override the similarly named field to `true` for any file inside this shared drive. "domainUsersOnly": True or False, # Whether access to this shared drive and items inside this shared drive is restricted to users of the domain to which this shared drive belongs. This restriction may be overridden by other sharing policies controlled outside of this shared drive. @@ -608,7 +608,7 @@

Method Details

"kind": "drive#drive", # Output only. Identifies what kind of resource this is. Value: the fixed string `"drive#drive"`. "name": "A String", # The name of this shared drive. "orgUnitId": "A String", # Output only. The organizational unit of this shared drive. This field is only populated on `drives.list` responses when the `useDomainAdminAccess` parameter is set to `true`. - "restrictions": { # A set of restrictions that apply to this shared drive or items inside this shared drive. + "restrictions": { # A set of restrictions that apply to this shared drive or items inside this shared drive. Note that restrictions can't be set when creating a shared drive. To add a restriction, first create a shared drive and then use `drives.update` to add restrictions. "adminManagedRestrictions": True or False, # Whether administrative privileges on this shared drive are required to modify restrictions. "copyRequiresWriterPermission": True or False, # Whether the options to copy, print, or download files inside this shared drive, should be disabled for readers and commenters. When this restriction is set to `true`, it will override the similarly named field to `true` for any file inside this shared drive. "domainUsersOnly": True or False, # Whether access to this shared drive and items inside this shared drive is restricted to users of the domain to which this shared drive belongs. This restriction may be overridden by other sharing policies controlled outside of this shared drive. diff --git a/docs/dyn/file_v1.projects.locations.instances.html b/docs/dyn/file_v1.projects.locations.instances.html index a45025ba959..37a700333f9 100644 --- a/docs/dyn/file_v1.projects.locations.instances.html +++ b/docs/dyn/file_v1.projects.locations.instances.html @@ -128,7 +128,7 @@

Method Details

"fileShares": [ # File system shares on the instance. For this version, only a single file share is supported. { # File share configuration for the instance. "capacityGb": "A String", # File share capacity in gigabytes (GB). Filestore defines 1 GB as 1024^3 bytes. - "name": "A String", # Required. The name of the file share. Must use 1-16 characters for the basic service tier and 1-63 characters for all other service tiers. Must use lowercase letters, numbers, or underscores [a-z0-9_]. Must start with a letter. Immutable. + "name": "A String", # Required. The name of the file share. Must use 1-16 characters for the basic service tier and 1-63 characters for all other service tiers. Must use lowercase letters, numbers, or underscores `[a-z0-9_]`. Must start with a letter. Immutable. "nfsExportOptions": [ # Nfs Export Options. There is a limit of 10 export options per file share. { # NFS export options specifications. "accessMode": "A String", # Either READ_ONLY, for allowing only read requests on the exported directory, or READ_WRITE, for allowing both read and write requests. The default is READ_WRITE. @@ -258,7 +258,7 @@

Method Details

"fileShares": [ # File system shares on the instance. For this version, only a single file share is supported. { # File share configuration for the instance. "capacityGb": "A String", # File share capacity in gigabytes (GB). Filestore defines 1 GB as 1024^3 bytes. - "name": "A String", # Required. The name of the file share. Must use 1-16 characters for the basic service tier and 1-63 characters for all other service tiers. Must use lowercase letters, numbers, or underscores [a-z0-9_]. Must start with a letter. Immutable. + "name": "A String", # Required. The name of the file share. Must use 1-16 characters for the basic service tier and 1-63 characters for all other service tiers. Must use lowercase letters, numbers, or underscores `[a-z0-9_]`. Must start with a letter. Immutable. "nfsExportOptions": [ # Nfs Export Options. There is a limit of 10 export options per file share. { # NFS export options specifications. "accessMode": "A String", # Either READ_ONLY, for allowing only read requests on the exported directory, or READ_WRITE, for allowing both read and write requests. The default is READ_WRITE. @@ -329,7 +329,7 @@

Method Details

"fileShares": [ # File system shares on the instance. For this version, only a single file share is supported. { # File share configuration for the instance. "capacityGb": "A String", # File share capacity in gigabytes (GB). Filestore defines 1 GB as 1024^3 bytes. - "name": "A String", # Required. The name of the file share. Must use 1-16 characters for the basic service tier and 1-63 characters for all other service tiers. Must use lowercase letters, numbers, or underscores [a-z0-9_]. Must start with a letter. Immutable. + "name": "A String", # Required. The name of the file share. Must use 1-16 characters for the basic service tier and 1-63 characters for all other service tiers. Must use lowercase letters, numbers, or underscores `[a-z0-9_]`. Must start with a letter. Immutable. "nfsExportOptions": [ # Nfs Export Options. There is a limit of 10 export options per file share. { # NFS export options specifications. "accessMode": "A String", # Either READ_ONLY, for allowing only read requests on the exported directory, or READ_WRITE, for allowing both read and write requests. The default is READ_WRITE. @@ -409,7 +409,7 @@

Method Details

"fileShares": [ # File system shares on the instance. For this version, only a single file share is supported. { # File share configuration for the instance. "capacityGb": "A String", # File share capacity in gigabytes (GB). Filestore defines 1 GB as 1024^3 bytes. - "name": "A String", # Required. The name of the file share. Must use 1-16 characters for the basic service tier and 1-63 characters for all other service tiers. Must use lowercase letters, numbers, or underscores [a-z0-9_]. Must start with a letter. Immutable. + "name": "A String", # Required. The name of the file share. Must use 1-16 characters for the basic service tier and 1-63 characters for all other service tiers. Must use lowercase letters, numbers, or underscores `[a-z0-9_]`. Must start with a letter. Immutable. "nfsExportOptions": [ # Nfs Export Options. There is a limit of 10 export options per file share. { # NFS export options specifications. "accessMode": "A String", # Either READ_ONLY, for allowing only read requests on the exported directory, or READ_WRITE, for allowing both read and write requests. The default is READ_WRITE. @@ -530,12 +530,12 @@

Method Details

Revert an existing instance's file system to a specified snapshot.
 
 Args:
-  name: string, Required. projects/{project_id}/locations/{location_id}/instances/{instance_id}. The resource name of the instance, in the format (required)
+  name: string, Required. `projects/{project_id}/locations/{location_id}/instances/{instance_id}`. The resource name of the instance, in the format (required)
   body: object, The request body.
     The object takes the form of:
 
 { # RevertInstanceRequest reverts the given instance's file share to the specified snapshot.
-  "targetSnapshotId": "A String", # Required. The snapshot resource ID, in the format 'my-snapshot', where the specified ID is the {snapshot_id} of the fully qualified name like projects/{project_id}/locations/{location_id}/instances/{instance_id}/snapshots/{snapshot_id}
+  "targetSnapshotId": "A String", # Required. The snapshot resource ID, in the format 'my-snapshot', where the specified ID is the {snapshot_id} of the fully qualified name like `projects/{project_id}/locations/{location_id}/instances/{instance_id}/snapshots/{snapshot_id}`
 }
 
   x__xgafv: string, V1 error format.
diff --git a/docs/dyn/file_v1beta1.projects.locations.instances.html b/docs/dyn/file_v1beta1.projects.locations.instances.html
index 3158f0a5e46..507acbfc4e4 100644
--- a/docs/dyn/file_v1beta1.projects.locations.instances.html
+++ b/docs/dyn/file_v1beta1.projects.locations.instances.html
@@ -141,7 +141,7 @@ 

Method Details

"fileShares": [ # File system shares on the instance. For this version, only a single file share is supported. { # File share configuration for the instance. "capacityGb": "A String", # File share capacity in gigabytes (GB). Filestore defines 1 GB as 1024^3 bytes. - "name": "A String", # Required. The name of the file share. Must use 1-16 characters for the basic service tier and 1-63 characters for all other service tiers. Must use lowercase letters, numbers, or underscores [a-z0-9_]. Must start with a letter. Immutable. + "name": "A String", # Required. The name of the file share. Must use 1-16 characters for the basic service tier and 1-63 characters for all other service tiers. Must use lowercase letters, numbers, or underscores `[a-z0-9_]`. Must start with a letter. Immutable. "nfsExportOptions": [ # Nfs Export Options. There is a limit of 10 export options per file share. { # NFS export options specifications. "accessMode": "A String", # Either READ_ONLY, for allowing only read requests on the exported directory, or READ_WRITE, for allowing both read and write requests. The default is READ_WRITE. @@ -286,7 +286,7 @@

Method Details

"fileShares": [ # File system shares on the instance. For this version, only a single file share is supported. { # File share configuration for the instance. "capacityGb": "A String", # File share capacity in gigabytes (GB). Filestore defines 1 GB as 1024^3 bytes. - "name": "A String", # Required. The name of the file share. Must use 1-16 characters for the basic service tier and 1-63 characters for all other service tiers. Must use lowercase letters, numbers, or underscores [a-z0-9_]. Must start with a letter. Immutable. + "name": "A String", # Required. The name of the file share. Must use 1-16 characters for the basic service tier and 1-63 characters for all other service tiers. Must use lowercase letters, numbers, or underscores `[a-z0-9_]`. Must start with a letter. Immutable. "nfsExportOptions": [ # Nfs Export Options. There is a limit of 10 export options per file share. { # NFS export options specifications. "accessMode": "A String", # Either READ_ONLY, for allowing only read requests on the exported directory, or READ_WRITE, for allowing both read and write requests. The default is READ_WRITE. @@ -372,7 +372,7 @@

Method Details

"fileShares": [ # File system shares on the instance. For this version, only a single file share is supported. { # File share configuration for the instance. "capacityGb": "A String", # File share capacity in gigabytes (GB). Filestore defines 1 GB as 1024^3 bytes. - "name": "A String", # Required. The name of the file share. Must use 1-16 characters for the basic service tier and 1-63 characters for all other service tiers. Must use lowercase letters, numbers, or underscores [a-z0-9_]. Must start with a letter. Immutable. + "name": "A String", # Required. The name of the file share. Must use 1-16 characters for the basic service tier and 1-63 characters for all other service tiers. Must use lowercase letters, numbers, or underscores `[a-z0-9_]`. Must start with a letter. Immutable. "nfsExportOptions": [ # Nfs Export Options. There is a limit of 10 export options per file share. { # NFS export options specifications. "accessMode": "A String", # Either READ_ONLY, for allowing only read requests on the exported directory, or READ_WRITE, for allowing both read and write requests. The default is READ_WRITE. @@ -467,7 +467,7 @@

Method Details

"fileShares": [ # File system shares on the instance. For this version, only a single file share is supported. { # File share configuration for the instance. "capacityGb": "A String", # File share capacity in gigabytes (GB). Filestore defines 1 GB as 1024^3 bytes. - "name": "A String", # Required. The name of the file share. Must use 1-16 characters for the basic service tier and 1-63 characters for all other service tiers. Must use lowercase letters, numbers, or underscores [a-z0-9_]. Must start with a letter. Immutable. + "name": "A String", # Required. The name of the file share. Must use 1-16 characters for the basic service tier and 1-63 characters for all other service tiers. Must use lowercase letters, numbers, or underscores `[a-z0-9_]`. Must start with a letter. Immutable. "nfsExportOptions": [ # Nfs Export Options. There is a limit of 10 export options per file share. { # NFS export options specifications. "accessMode": "A String", # Either READ_ONLY, for allowing only read requests on the exported directory, or READ_WRITE, for allowing both read and write requests. The default is READ_WRITE. @@ -596,12 +596,12 @@

Method Details

Revert an existing instance's file system to a specified snapshot.
 
 Args:
-  name: string, Required. projects/{project_id}/locations/{location_id}/instances/{instance_id}. The resource name of the instance, in the format (required)
+  name: string, Required. `projects/{project_id}/locations/{location_id}/instances/{instance_id}`. The resource name of the instance, in the format (required)
   body: object, The request body.
     The object takes the form of:
 
 { # RevertInstanceRequest reverts the given instance's file share to the specified snapshot.
-  "targetSnapshotId": "A String", # Required. The snapshot resource ID, in the format 'my-snapshot', where the specified ID is the {snapshot_id} of the fully qualified name like projects/{project_id}/locations/{location_id}/instances/{instance_id}/snapshots/{snapshot_id}
+  "targetSnapshotId": "A String", # Required. The snapshot resource ID, in the format 'my-snapshot', where the specified ID is the {snapshot_id} of the fully qualified name like `projects/{project_id}/locations/{location_id}/instances/{instance_id}/snapshots/{snapshot_id}`
 }
 
   x__xgafv: string, V1 error format.
diff --git a/docs/dyn/firebaseappdistribution_v1alpha.apps.html b/docs/dyn/firebaseappdistribution_v1alpha.apps.html
new file mode 100644
index 00000000000..1af20e20f6a
--- /dev/null
+++ b/docs/dyn/firebaseappdistribution_v1alpha.apps.html
@@ -0,0 +1,186 @@
+
+
+
+

Firebase App Distribution API . apps

+

Instance Methods

+

+ release_by_hash() +

+

Returns the release_by_hash Resource.

+ +

+ releases() +

+

Returns the releases Resource.

+ +

+ testers() +

+

Returns the testers Resource.

+ +

+ upload_status() +

+

Returns the upload_status Resource.

+ +

+ close()

+

Close httplib2 connections.

+

+ get(mobilesdkAppId, appView=None, x__xgafv=None)

+

Get the app, if it exists

+

+ getJwt(mobilesdkAppId, x__xgafv=None)

+

Get a JWT token

+

+ provisionApp(mobilesdkAppId, x__xgafv=None)

+

Provision app distribution for an existing Firebase app, enabling it to subsequently be used by appdistro.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ +
+ get(mobilesdkAppId, appView=None, x__xgafv=None) +
Get the app, if it exists
+
+Args:
+  mobilesdkAppId: string, Unique id for a Firebase app of the format: {version}:{project_number}:{platform}:{hash(bundle_id)} Example: 1:581234567376:android:aa0a3c7b135e90289 (required)
+  appView: string, App view. When unset or set to BASIC, returns an App with everything set except for aab_state. When set to FULL, returns an App with aab_state set.
+    Allowed values
+      APP_VIEW_UNSPECIFIED - The default / unset value. The API will default to the BASIC view.
+      BASIC - Include everything except aab_state.
+      FULL - Include everything.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    {
+  "aabCertificate": { # App bundle test certificate # App bundle test certificate generated for the app.
+    "certificateHashMd5": "A String", # MD5 hash of the certificate used to resign the AAB
+    "certificateHashSha1": "A String", # SHA1 hash of the certificate used to resign the AAB
+    "certificateHashSha256": "A String", # SHA256 hash of the certificate used to resign the AAB
+  },
+  "aabState": "A String", # App bundle state. Only valid for android apps. The app_view field in the request must be set to FULL in order for this to be populated.
+  "appId": "A String", # Firebase gmp app id
+  "bundleId": "A String", # Bundle identifier
+  "contactEmail": "A String", # Developer contact email for testers to reach out to about privacy or support issues.
+  "platform": "A String", # iOS or Android
+  "projectNumber": "A String", # Project number of the Firebase project, for example 300830567303.
+}
+
+ +
+ getJwt(mobilesdkAppId, x__xgafv=None) +
Get a JWT token
+
+Args:
+  mobilesdkAppId: string, Unique id for a Firebase app of the format: {version}:{project_number}:{platform}:{hash(bundle_id)} Example: 1:581234567376:android:aa0a3c7b135e90289 (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    {
+  "token": "A String",
+}
+
+ +
+ provisionApp(mobilesdkAppId, x__xgafv=None) +
Provision app distribution for an existing Firebase app, enabling it to subsequently be used by appdistro.
+
+Args:
+  mobilesdkAppId: string, Unique id for a Firebase app of the format: {version}:{project_number}:{platform}:{hash(bundle_id)} Example: 1:581234567376:android:aa0a3c7b135e90289 (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    {
+}
+
+ + \ No newline at end of file diff --git a/docs/dyn/firebaseappdistribution_v1alpha.apps.release_by_hash.html b/docs/dyn/firebaseappdistribution_v1alpha.apps.release_by_hash.html new file mode 100644 index 00000000000..30154476b81 --- /dev/null +++ b/docs/dyn/firebaseappdistribution_v1alpha.apps.release_by_hash.html @@ -0,0 +1,121 @@ + + + +

Firebase App Distribution API . apps . release_by_hash

+

Instance Methods

+

+ close()

+

Close httplib2 connections.

+

+ get(mobilesdkAppId, uploadHash, x__xgafv=None)

+

GET Release by binary upload hash

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ +
+ get(mobilesdkAppId, uploadHash, x__xgafv=None) +
GET Release by binary upload hash
+
+Args:
+  mobilesdkAppId: string, Unique id for a Firebase app of the format: {version}:{project_number}:{platform}:{hash(bundle_id)} Example: 1:581234567376:android:aa0a3c7b135e90289 (required)
+  uploadHash: string, The hash for the upload (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response object to get the release given a upload hash
+  "release": { # Proto defining a release object # Release object
+    "buildVersion": "A String", # Release build version
+    "displayVersion": "A String", # Release version
+    "distributedAt": "A String", # Timestamp when the release was created
+    "id": "A String", # Release Id
+    "instanceId": "A String", # Instance id of the release
+    "lastActivityAt": "A String", # Last activity timestamp
+    "openInvitationCount": 42, # Number of testers who have open invitations for the release
+    "receivedAt": "A String", # unused.
+    "releaseNotesSummary": "A String", # Release notes summary
+    "testerCount": 42, # Count of testers added to the release
+    "testerWithInstallCount": 42, # Number of testers who have installed the release
+  },
+}
+
+ + \ No newline at end of file diff --git a/docs/dyn/firebaseappdistribution_v1alpha.apps.releases.html b/docs/dyn/firebaseappdistribution_v1alpha.apps.releases.html new file mode 100644 index 00000000000..c79dd9adfaa --- /dev/null +++ b/docs/dyn/firebaseappdistribution_v1alpha.apps.releases.html @@ -0,0 +1,127 @@ + + + +

Firebase App Distribution API . apps . releases

+

Instance Methods

+

+ notes() +

+

Returns the notes Resource.

+ +

+ close()

+

Close httplib2 connections.

+

+ enable_access(mobilesdkAppId, releaseId, body=None, x__xgafv=None)

+

Enable access on a release for testers.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ +
+ enable_access(mobilesdkAppId, releaseId, body=None, x__xgafv=None) +
Enable access on a release for testers.
+
+Args:
+  mobilesdkAppId: string, Unique id for a Firebase app of the format: {version}:{project_number}:{platform}:{hash(bundle_id)} Example: 1:581234567376:android:aa0a3c7b135e90289 (required)
+  releaseId: string, Release identifier (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{
+  "buildVersion": "A String", # Optional. Ignored. Used to be build version of the app release if an instance identifier was provided for the release_id.
+  "displayVersion": "A String", # Optional. Ignored. Used to be display version of the app release if an instance identifier was provided for the release_id.
+  "emails": [ # Optional. An email address which should get access to this release, for example rebeccahe@google.com
+    "A String",
+  ],
+  "groupIds": [ # Optional. A repeated list of group aliases to enable access to a release for Note: This field is misnamed, but can't be changed because we need to maintain compatibility with old build tools
+    "A String",
+  ],
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    {
+}
+
+ + \ No newline at end of file diff --git a/docs/dyn/firebaseappdistribution_v1alpha.apps.releases.notes.html b/docs/dyn/firebaseappdistribution_v1alpha.apps.releases.notes.html new file mode 100644 index 00000000000..bb6140dfee1 --- /dev/null +++ b/docs/dyn/firebaseappdistribution_v1alpha.apps.releases.notes.html @@ -0,0 +1,117 @@ + + + +

Firebase App Distribution API . apps . releases . notes

+

Instance Methods

+

+ close()

+

Close httplib2 connections.

+

+ create(mobilesdkAppId, releaseId, body=None, x__xgafv=None)

+

Create release notes on a release.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ +
+ create(mobilesdkAppId, releaseId, body=None, x__xgafv=None) +
Create release notes on a release.
+
+Args:
+  mobilesdkAppId: string, Unique id for a Firebase app of the format: {version}:{project_number}:{platform}:{hash(bundle_id)} Example: 1:581234567376:android:aa0a3c7b135e90289 (required)
+  releaseId: string, Release identifier (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{
+  "releaseNotes": { # The actual release notes body from the user
+    "releaseNotes": "A String",
+  },
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    {
+}
+
+ + \ No newline at end of file diff --git a/docs/dyn/firebaseappdistribution_v1alpha.apps.testers.html b/docs/dyn/firebaseappdistribution_v1alpha.apps.testers.html new file mode 100644 index 00000000000..b25e48883b0 --- /dev/null +++ b/docs/dyn/firebaseappdistribution_v1alpha.apps.testers.html @@ -0,0 +1,114 @@ + + + +

Firebase App Distribution API . apps . testers

+

Instance Methods

+

+ close()

+

Close httplib2 connections.

+

+ getTesterUdids(mobilesdkAppId, x__xgafv=None)

+

Get UDIDs of tester iOS devices in a project

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ +
+ getTesterUdids(mobilesdkAppId, x__xgafv=None) +
Get UDIDs of tester iOS devices in a project
+
+Args:
+  mobilesdkAppId: string, Unique id for a Firebase app of the format: {version}:{project_number}:{platform}:{hash(bundle_id)} Example: 1:581234567376:android:aa0a3c7b135e90289 (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response containing the UDIDs of tester iOS devices in a project
+  "testerUdids": [ # The UDIDs of tester iOS devices in a project
+    { # The UDIDs of a tester's iOS device
+      "name": "A String", # The name of the tester's device
+      "platform": "A String", # The platform of the tester's device
+      "udid": "A String", # The UDID of the tester's device
+    },
+  ],
+}
+
+ + \ No newline at end of file diff --git a/docs/dyn/firebaseappdistribution_v1alpha.apps.upload_status.html b/docs/dyn/firebaseappdistribution_v1alpha.apps.upload_status.html new file mode 100644 index 00000000000..b01efd7bdc8 --- /dev/null +++ b/docs/dyn/firebaseappdistribution_v1alpha.apps.upload_status.html @@ -0,0 +1,124 @@ + + + +

Firebase App Distribution API . apps . upload_status

+

Instance Methods

+

+ close()

+

Close httplib2 connections.

+

+ get(mobilesdkAppId, uploadToken, x__xgafv=None)

+

GET Binary upload status by token

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ +
+ get(mobilesdkAppId, uploadToken, x__xgafv=None) +
GET Binary upload status by token
+
+Args:
+  mobilesdkAppId: string, Unique id for a Firebase app of the format: {version}:{project_number}:{platform}:{hash(bundle_id)} Example: 1:581234567376:android:aa0a3c7b135e90289 (required)
+  uploadToken: string, The token for the upload (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    {
+  "errorCode": "A String", # The error code associated with (only set on "FAILURE")
+  "message": "A String", # Any additional context for the given upload status (e.g. error message) Meant to be displayed to the client
+  "release": { # Proto defining a release object # The release that was created from the upload (only set on "SUCCESS")
+    "buildVersion": "A String", # Release build version
+    "displayVersion": "A String", # Release version
+    "distributedAt": "A String", # Timestamp when the release was created
+    "id": "A String", # Release Id
+    "instanceId": "A String", # Instance id of the release
+    "lastActivityAt": "A String", # Last activity timestamp
+    "openInvitationCount": 42, # Number of testers who have open invitations for the release
+    "receivedAt": "A String", # unused.
+    "releaseNotesSummary": "A String", # Release notes summary
+    "testerCount": 42, # Count of testers added to the release
+    "testerWithInstallCount": 42, # Number of testers who have installed the release
+  },
+  "status": "A String", # The status of the upload
+}
+
+ + \ No newline at end of file diff --git a/docs/dyn/firebaseappdistribution_v1alpha.html b/docs/dyn/firebaseappdistribution_v1alpha.html new file mode 100644 index 00000000000..189bd581945 --- /dev/null +++ b/docs/dyn/firebaseappdistribution_v1alpha.html @@ -0,0 +1,116 @@ + + + +

Firebase App Distribution API

+

Instance Methods

+

+ apps() +

+

Returns the apps Resource.

+ +

+ projects() +

+

Returns the projects Resource.

+ +

+ close()

+

Close httplib2 connections.

+

+ new_batch_http_request()

+

Create a BatchHttpRequest object based on the discovery document.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ +
+ new_batch_http_request() +
Create a BatchHttpRequest object based on the discovery document.
+
+                Args:
+                  callback: callable, A callback to be called for each response, of the
+                    form callback(id, response, exception). The first parameter is the
+                    request id, and the second is the deserialized response object. The
+                    third is an apiclient.errors.HttpError exception object if an HTTP
+                    error occurred while processing the request, or None if no error
+                    occurred.
+
+                Returns:
+                  A BatchHttpRequest object based on the discovery document.
+                
+
+ + \ No newline at end of file diff --git a/docs/dyn/firebaseappdistribution_v1alpha.projects.apps.html b/docs/dyn/firebaseappdistribution_v1alpha.projects.apps.html new file mode 100644 index 00000000000..bbba3f3db1c --- /dev/null +++ b/docs/dyn/firebaseappdistribution_v1alpha.projects.apps.html @@ -0,0 +1,200 @@ + + + +

Firebase App Distribution API . projects . apps

+

Instance Methods

+

+ releases() +

+

Returns the releases Resource.

+ +

+ close()

+

Close httplib2 connections.

+

+ getTestConfig(name, x__xgafv=None)

+

Gets configuration for automated tests.

+

+ updateTestConfig(name, body=None, updateMask=None, x__xgafv=None)

+

Updates a release.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ +
+ getTestConfig(name, x__xgafv=None) +
Gets configuration for automated tests.
+
+Args:
+  name: string, Required. The name of the `TestConfig` resource to retrieve. Format: `projects/{project_number}/apps/{app_id}/testConfig` (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Configuration for automated tests
+  "name": "A String", # Identifier. The name of the test configuration resource. Format: `projects/{project_number}/apps/{app_id}/testConfig`
+  "roboCrawler": { # Configuration for Robo crawler # Optional. Configuration for Robo crawler
+    "loginCredential": { # Login credential for automated tests # Optional. Login credential for automated tests
+      "fieldHints": { # Hints to the crawler for identifying input fields # Optional. Hints to the crawler for identifying input fields
+        "passwordResourceName": "A String", # Required. The Android resource name of the password UI element. For example, in Java: R.string.foo in xml: @string/foo Only the "foo" part is needed. Reference doc: https://developer.android.com/guide/topics/resources/accessing-resources.html
+        "usernameResourceName": "A String", # Required. The Android resource name of the username UI element. For example, in Java: R.string.foo in xml: @string/foo Only the "foo" part is needed. Reference doc: https://developer.android.com/guide/topics/resources/accessing-resources.html
+      },
+      "google": True or False, # Optional. Are these credentials for Google?
+      "password": "A String", # Optional. Password for automated tests
+      "username": "A String", # Optional. Username for automated tests
+    },
+  },
+  "testDevices": [ # Optional. Tests will be run on this list of devices
+    { # A device on which automated tests can be run.
+      "locale": "A String", # Optional. The locale of the device (e.g. "en_US" for US English) during the test.
+      "model": "A String", # Required. The device model.
+      "orientation": "A String", # Optional. The orientation of the device during the test.
+      "version": "A String", # Required. The version of the device (API level on Android).
+    },
+  ],
+}
+
+ +
+ updateTestConfig(name, body=None, updateMask=None, x__xgafv=None) +
Updates a release.
+
+Args:
+  name: string, Identifier. The name of the test configuration resource. Format: `projects/{project_number}/apps/{app_id}/testConfig` (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Configuration for automated tests
+  "name": "A String", # Identifier. The name of the test configuration resource. Format: `projects/{project_number}/apps/{app_id}/testConfig`
+  "roboCrawler": { # Configuration for Robo crawler # Optional. Configuration for Robo crawler
+    "loginCredential": { # Login credential for automated tests # Optional. Login credential for automated tests
+      "fieldHints": { # Hints to the crawler for identifying input fields # Optional. Hints to the crawler for identifying input fields
+        "passwordResourceName": "A String", # Required. The Android resource name of the password UI element. For example, in Java: R.string.foo in xml: @string/foo Only the "foo" part is needed. Reference doc: https://developer.android.com/guide/topics/resources/accessing-resources.html
+        "usernameResourceName": "A String", # Required. The Android resource name of the username UI element. For example, in Java: R.string.foo in xml: @string/foo Only the "foo" part is needed. Reference doc: https://developer.android.com/guide/topics/resources/accessing-resources.html
+      },
+      "google": True or False, # Optional. Are these credentials for Google?
+      "password": "A String", # Optional. Password for automated tests
+      "username": "A String", # Optional. Username for automated tests
+    },
+  },
+  "testDevices": [ # Optional. Tests will be run on this list of devices
+    { # A device on which automated tests can be run.
+      "locale": "A String", # Optional. The locale of the device (e.g. "en_US" for US English) during the test.
+      "model": "A String", # Required. The device model.
+      "orientation": "A String", # Optional. The orientation of the device during the test.
+      "version": "A String", # Required. The version of the device (API level on Android).
+    },
+  ],
+}
+
+  updateMask: string, Optional. The list of fields to update.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Configuration for automated tests
+  "name": "A String", # Identifier. The name of the test configuration resource. Format: `projects/{project_number}/apps/{app_id}/testConfig`
+  "roboCrawler": { # Configuration for Robo crawler # Optional. Configuration for Robo crawler
+    "loginCredential": { # Login credential for automated tests # Optional. Login credential for automated tests
+      "fieldHints": { # Hints to the crawler for identifying input fields # Optional. Hints to the crawler for identifying input fields
+        "passwordResourceName": "A String", # Required. The Android resource name of the password UI element. For example, in Java: R.string.foo in xml: @string/foo Only the "foo" part is needed. Reference doc: https://developer.android.com/guide/topics/resources/accessing-resources.html
+        "usernameResourceName": "A String", # Required. The Android resource name of the username UI element. For example, in Java: R.string.foo in xml: @string/foo Only the "foo" part is needed. Reference doc: https://developer.android.com/guide/topics/resources/accessing-resources.html
+      },
+      "google": True or False, # Optional. Are these credentials for Google?
+      "password": "A String", # Optional. Password for automated tests
+      "username": "A String", # Optional. Username for automated tests
+    },
+  },
+  "testDevices": [ # Optional. Tests will be run on this list of devices
+    { # A device on which automated tests can be run.
+      "locale": "A String", # Optional. The locale of the device (e.g. "en_US" for US English) during the test.
+      "model": "A String", # Required. The device model.
+      "orientation": "A String", # Optional. The orientation of the device during the test.
+      "version": "A String", # Required. The version of the device (API level on Android).
+    },
+  ],
+}
+
+ + \ No newline at end of file diff --git a/docs/dyn/firebaseappdistribution_v1alpha.projects.apps.releases.html b/docs/dyn/firebaseappdistribution_v1alpha.projects.apps.releases.html new file mode 100644 index 00000000000..04e10aef4b3 --- /dev/null +++ b/docs/dyn/firebaseappdistribution_v1alpha.projects.apps.releases.html @@ -0,0 +1,91 @@ + + + +

Firebase App Distribution API . projects . apps . releases

+

Instance Methods

+

+ tests() +

+

Returns the tests Resource.

+ +

+ close()

+

Close httplib2 connections.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ + \ No newline at end of file diff --git a/docs/dyn/firebaseappdistribution_v1alpha.projects.apps.releases.tests.html b/docs/dyn/firebaseappdistribution_v1alpha.projects.apps.releases.tests.html new file mode 100644 index 00000000000..b8b85f01e2f --- /dev/null +++ b/docs/dyn/firebaseappdistribution_v1alpha.projects.apps.releases.tests.html @@ -0,0 +1,340 @@ + + + +

Firebase App Distribution API . projects . apps . releases . tests

+

Instance Methods

+

+ close()

+

Close httplib2 connections.

+

+ create(parent, body=None, releaseTestId=None, x__xgafv=None)

+

Run automated test(s) on release.

+

+ get(name, x__xgafv=None)

+

Get results for automated test run on release.

+

+ list(parent, pageSize=None, pageToken=None, x__xgafv=None)

+

List results for automated tests run on release.

+

+ list_next()

+

Retrieves the next page of results.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ +
+ create(parent, body=None, releaseTestId=None, x__xgafv=None) +
Run automated test(s) on release.
+
+Args:
+  parent: string, Required. The name of the release resource, which is the parent of the test Format: `projects/{project_number}/apps/{app_id}/releases/{release_id}` (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # The results of running an automated test on a release.
+  "createTime": "A String", # Output only. Timestamp when the test was run.
+  "deviceExecutions": [ # Required. The results of the test on each device.
+    { # The results of running an automated test on a particular device.
+      "appCrash": { # An app crash that occurred during an automated test. # Output only. An app crash, if any occurred during the test.
+        "message": "A String", # Output only. The message associated with the crash.
+        "stackTrace": "A String", # Output only. The raw stack trace.
+      },
+      "crawlGraphUri": "A String", # Output only. A URI to an image of the Robo crawl graph.
+      "device": { # A device on which automated tests can be run. # Required. The device that the test was run on.
+        "locale": "A String", # Optional. The locale of the device (e.g. "en_US" for US English) during the test.
+        "model": "A String", # Required. The device model.
+        "orientation": "A String", # Optional. The orientation of the device during the test.
+        "version": "A String", # Required. The version of the device (API level on Android).
+      },
+      "failedReason": "A String", # Output only. The reason why the test failed.
+      "inconclusiveReason": "A String", # Output only. The reason why the test was inconclusive.
+      "resultsStoragePath": "A String", # Output only. The path to a directory in Cloud Storage that will eventually contain the results for this execution. For example, gs://bucket/Nexus5-18-en-portrait.
+      "roboStats": { # Statistics collected during a Robo test. # Output only. The statistics collected during the Robo test.
+        "actionsPerformed": 42, # Output only. Number of actions that crawler performed.
+        "crawlDuration": "A String", # Output only. Duration of crawl.
+        "distinctVisitedScreens": 42, # Output only. Number of distinct screens visited.
+        "mainActivityCrawlTimedOut": True or False, # Output only. Whether the main activity crawl timed out.
+      },
+      "screenshotUris": [ # Output only. A list of screenshot image URIs taken from the Robo crawl. The file names are numbered by the order in which they were taken.
+        "A String",
+      ],
+      "state": "A String", # Output only. The state of the test.
+      "videoUri": "A String", # Output only. A URI to a video of the test run.
+    },
+  ],
+  "loginCredential": { # Login credential for automated tests # Optional. Input only. Login credentials for the test. Input only.
+    "fieldHints": { # Hints to the crawler for identifying input fields # Optional. Hints to the crawler for identifying input fields
+      "passwordResourceName": "A String", # Required. The Android resource name of the password UI element. For example, in Java: R.string.foo in xml: @string/foo Only the "foo" part is needed. Reference doc: https://developer.android.com/guide/topics/resources/accessing-resources.html
+      "usernameResourceName": "A String", # Required. The Android resource name of the username UI element. For example, in Java: R.string.foo in xml: @string/foo Only the "foo" part is needed. Reference doc: https://developer.android.com/guide/topics/resources/accessing-resources.html
+    },
+    "google": True or False, # Optional. Are these credentials for Google?
+    "password": "A String", # Optional. Password for automated tests
+    "username": "A String", # Optional. Username for automated tests
+  },
+  "name": "A String", # The name of the release test resource. Format: `projects/{project_number}/apps/{app_id}/releases/{release_id}/tests/{test_id}`
+}
+
+  releaseTestId: string, Optional. The ID to use for the test, which will become the final component of the tests's resource name. This value should be 4-63 characters, and valid characters are /a-z-/. If it is not provided one will be automatically generated.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # The results of running an automated test on a release.
+  "createTime": "A String", # Output only. Timestamp when the test was run.
+  "deviceExecutions": [ # Required. The results of the test on each device.
+    { # The results of running an automated test on a particular device.
+      "appCrash": { # An app crash that occurred during an automated test. # Output only. An app crash, if any occurred during the test.
+        "message": "A String", # Output only. The message associated with the crash.
+        "stackTrace": "A String", # Output only. The raw stack trace.
+      },
+      "crawlGraphUri": "A String", # Output only. A URI to an image of the Robo crawl graph.
+      "device": { # A device on which automated tests can be run. # Required. The device that the test was run on.
+        "locale": "A String", # Optional. The locale of the device (e.g. "en_US" for US English) during the test.
+        "model": "A String", # Required. The device model.
+        "orientation": "A String", # Optional. The orientation of the device during the test.
+        "version": "A String", # Required. The version of the device (API level on Android).
+      },
+      "failedReason": "A String", # Output only. The reason why the test failed.
+      "inconclusiveReason": "A String", # Output only. The reason why the test was inconclusive.
+      "resultsStoragePath": "A String", # Output only. The path to a directory in Cloud Storage that will eventually contain the results for this execution. For example, gs://bucket/Nexus5-18-en-portrait.
+      "roboStats": { # Statistics collected during a Robo test. # Output only. The statistics collected during the Robo test.
+        "actionsPerformed": 42, # Output only. Number of actions that crawler performed.
+        "crawlDuration": "A String", # Output only. Duration of crawl.
+        "distinctVisitedScreens": 42, # Output only. Number of distinct screens visited.
+        "mainActivityCrawlTimedOut": True or False, # Output only. Whether the main activity crawl timed out.
+      },
+      "screenshotUris": [ # Output only. A list of screenshot image URIs taken from the Robo crawl. The file names are numbered by the order in which they were taken.
+        "A String",
+      ],
+      "state": "A String", # Output only. The state of the test.
+      "videoUri": "A String", # Output only. A URI to a video of the test run.
+    },
+  ],
+  "loginCredential": { # Login credential for automated tests # Optional. Input only. Login credentials for the test. Input only.
+    "fieldHints": { # Hints to the crawler for identifying input fields # Optional. Hints to the crawler for identifying input fields
+      "passwordResourceName": "A String", # Required. The Android resource name of the password UI element. For example, in Java: R.string.foo in xml: @string/foo Only the "foo" part is needed. Reference doc: https://developer.android.com/guide/topics/resources/accessing-resources.html
+      "usernameResourceName": "A String", # Required. The Android resource name of the username UI element. For example, in Java: R.string.foo in xml: @string/foo Only the "foo" part is needed. Reference doc: https://developer.android.com/guide/topics/resources/accessing-resources.html
+    },
+    "google": True or False, # Optional. Are these credentials for Google?
+    "password": "A String", # Optional. Password for automated tests
+    "username": "A String", # Optional. Username for automated tests
+  },
+  "name": "A String", # The name of the release test resource. Format: `projects/{project_number}/apps/{app_id}/releases/{release_id}/tests/{test_id}`
+}
+
+ +
+ get(name, x__xgafv=None) +
Get results for automated test run on release.
+
+Args:
+  name: string, Required. The name of the release test resource. Format: `projects/{project_number}/apps/{app_id}/releases/{release_id}/tests/{test_id}` (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # The results of running an automated test on a release.
+  "createTime": "A String", # Output only. Timestamp when the test was run.
+  "deviceExecutions": [ # Required. The results of the test on each device.
+    { # The results of running an automated test on a particular device.
+      "appCrash": { # An app crash that occurred during an automated test. # Output only. An app crash, if any occurred during the test.
+        "message": "A String", # Output only. The message associated with the crash.
+        "stackTrace": "A String", # Output only. The raw stack trace.
+      },
+      "crawlGraphUri": "A String", # Output only. A URI to an image of the Robo crawl graph.
+      "device": { # A device on which automated tests can be run. # Required. The device that the test was run on.
+        "locale": "A String", # Optional. The locale of the device (e.g. "en_US" for US English) during the test.
+        "model": "A String", # Required. The device model.
+        "orientation": "A String", # Optional. The orientation of the device during the test.
+        "version": "A String", # Required. The version of the device (API level on Android).
+      },
+      "failedReason": "A String", # Output only. The reason why the test failed.
+      "inconclusiveReason": "A String", # Output only. The reason why the test was inconclusive.
+      "resultsStoragePath": "A String", # Output only. The path to a directory in Cloud Storage that will eventually contain the results for this execution. For example, gs://bucket/Nexus5-18-en-portrait.
+      "roboStats": { # Statistics collected during a Robo test. # Output only. The statistics collected during the Robo test.
+        "actionsPerformed": 42, # Output only. Number of actions that crawler performed.
+        "crawlDuration": "A String", # Output only. Duration of crawl.
+        "distinctVisitedScreens": 42, # Output only. Number of distinct screens visited.
+        "mainActivityCrawlTimedOut": True or False, # Output only. Whether the main activity crawl timed out.
+      },
+      "screenshotUris": [ # Output only. A list of screenshot image URIs taken from the Robo crawl. The file names are numbered by the order in which they were taken.
+        "A String",
+      ],
+      "state": "A String", # Output only. The state of the test.
+      "videoUri": "A String", # Output only. A URI to a video of the test run.
+    },
+  ],
+  "loginCredential": { # Login credential for automated tests # Optional. Input only. Login credentials for the test. Input only.
+    "fieldHints": { # Hints to the crawler for identifying input fields # Optional. Hints to the crawler for identifying input fields
+      "passwordResourceName": "A String", # Required. The Android resource name of the password UI element. For example, in Java: R.string.foo in xml: @string/foo Only the "foo" part is needed. Reference doc: https://developer.android.com/guide/topics/resources/accessing-resources.html
+      "usernameResourceName": "A String", # Required. The Android resource name of the username UI element. For example, in Java: R.string.foo in xml: @string/foo Only the "foo" part is needed. Reference doc: https://developer.android.com/guide/topics/resources/accessing-resources.html
+    },
+    "google": True or False, # Optional. Are these credentials for Google?
+    "password": "A String", # Optional. Password for automated tests
+    "username": "A String", # Optional. Username for automated tests
+  },
+  "name": "A String", # The name of the release test resource. Format: `projects/{project_number}/apps/{app_id}/releases/{release_id}/tests/{test_id}`
+}
+
+ +
+ list(parent, pageSize=None, pageToken=None, x__xgafv=None) +
List results for automated tests run on release.
+
+Args:
+  parent: string, Required. The name of the release resource, which is the parent of the tests Format: `projects/{project_number}/apps/{app_id}/releases/{release_id}` (required)
+  pageSize: integer, Optional. The maximum number of tests to return. The service may return fewer than this value.
+  pageToken: string, Optional. A page token, received from a previous `ListReleaseTests` call. Provide this to retrieve the subsequent page.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # The response message for `ListReleaseTests`.
+  "nextPageToken": "A String", # A short-lived token, which can be sent as `pageToken` to retrieve the next page. If this field is omitted, there are no subsequent pages.
+  "releaseTests": [ # The tests listed.
+    { # The results of running an automated test on a release.
+      "createTime": "A String", # Output only. Timestamp when the test was run.
+      "deviceExecutions": [ # Required. The results of the test on each device.
+        { # The results of running an automated test on a particular device.
+          "appCrash": { # An app crash that occurred during an automated test. # Output only. An app crash, if any occurred during the test.
+            "message": "A String", # Output only. The message associated with the crash.
+            "stackTrace": "A String", # Output only. The raw stack trace.
+          },
+          "crawlGraphUri": "A String", # Output only. A URI to an image of the Robo crawl graph.
+          "device": { # A device on which automated tests can be run. # Required. The device that the test was run on.
+            "locale": "A String", # Optional. The locale of the device (e.g. "en_US" for US English) during the test.
+            "model": "A String", # Required. The device model.
+            "orientation": "A String", # Optional. The orientation of the device during the test.
+            "version": "A String", # Required. The version of the device (API level on Android).
+          },
+          "failedReason": "A String", # Output only. The reason why the test failed.
+          "inconclusiveReason": "A String", # Output only. The reason why the test was inconclusive.
+          "resultsStoragePath": "A String", # Output only. The path to a directory in Cloud Storage that will eventually contain the results for this execution. For example, gs://bucket/Nexus5-18-en-portrait.
+          "roboStats": { # Statistics collected during a Robo test. # Output only. The statistics collected during the Robo test.
+            "actionsPerformed": 42, # Output only. Number of actions that crawler performed.
+            "crawlDuration": "A String", # Output only. Duration of crawl.
+            "distinctVisitedScreens": 42, # Output only. Number of distinct screens visited.
+            "mainActivityCrawlTimedOut": True or False, # Output only. Whether the main activity crawl timed out.
+          },
+          "screenshotUris": [ # Output only. A list of screenshot image URIs taken from the Robo crawl. The file names are numbered by the order in which they were taken.
+            "A String",
+          ],
+          "state": "A String", # Output only. The state of the test.
+          "videoUri": "A String", # Output only. A URI to a video of the test run.
+        },
+      ],
+      "loginCredential": { # Login credential for automated tests # Optional. Input only. Login credentials for the test. Input only.
+        "fieldHints": { # Hints to the crawler for identifying input fields # Optional. Hints to the crawler for identifying input fields
+          "passwordResourceName": "A String", # Required. The Android resource name of the password UI element. For example, in Java: R.string.foo in xml: @string/foo Only the "foo" part is needed. Reference doc: https://developer.android.com/guide/topics/resources/accessing-resources.html
+          "usernameResourceName": "A String", # Required. The Android resource name of the username UI element. For example, in Java: R.string.foo in xml: @string/foo Only the "foo" part is needed. Reference doc: https://developer.android.com/guide/topics/resources/accessing-resources.html
+        },
+        "google": True or False, # Optional. Are these credentials for Google?
+        "password": "A String", # Optional. Password for automated tests
+        "username": "A String", # Optional. Username for automated tests
+      },
+      "name": "A String", # The name of the release test resource. Format: `projects/{project_number}/apps/{app_id}/releases/{release_id}/tests/{test_id}`
+    },
+  ],
+}
+
+ +
+ list_next() +
Retrieves the next page of results.
+
+        Args:
+          previous_request: The request for the previous page. (required)
+          previous_response: The response from the request for the previous page. (required)
+
+        Returns:
+          A request object that you can call 'execute()' on to request the next
+          page. Returns None if there are no more items in the collection.
+        
+
+ + \ No newline at end of file diff --git a/docs/dyn/firebaseappdistribution_v1alpha.projects.html b/docs/dyn/firebaseappdistribution_v1alpha.projects.html new file mode 100644 index 00000000000..fdf6d95eb6d --- /dev/null +++ b/docs/dyn/firebaseappdistribution_v1alpha.projects.html @@ -0,0 +1,91 @@ + + + +

Firebase App Distribution API . projects

+

Instance Methods

+

+ apps() +

+

Returns the apps Resource.

+ +

+ close()

+

Close httplib2 connections.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ + \ No newline at end of file diff --git a/docs/dyn/gkebackup_v1.projects.locations.restorePlans.html b/docs/dyn/gkebackup_v1.projects.locations.restorePlans.html index 99db623575f..c3e5f9cd5ab 100644 --- a/docs/dyn/gkebackup_v1.projects.locations.restorePlans.html +++ b/docs/dyn/gkebackup_v1.projects.locations.restorePlans.html @@ -134,7 +134,7 @@

Method Details

"a_key": "A String", }, "name": "A String", # Output only. The full name of the RestorePlan resource. Format: `projects/*/locations/*/restorePlans/*`. - "restoreConfig": { # Configuration of a restore. Next id: 13 # Required. Configuration of Restores created via this RestorePlan. + "restoreConfig": { # Configuration of a restore. Next id: 14 # Required. Configuration of Restores created via this RestorePlan. "allNamespaces": True or False, # Restore all namespaced resources in the Backup if set to "True". Specifying this field to "False" is an error. "clusterResourceConflictPolicy": "A String", # Optional. Defines the behavior for handling the situation where cluster-scoped resources being restored already exist in the target cluster. This MUST be set to a value other than CLUSTER_RESOURCE_CONFLICT_POLICY_UNSPECIFIED if cluster_resource_restore_scope is not empty. "clusterResourceRestoreScope": { # Defines the scope of cluster-scoped resources to restore. Some group kinds are not reasonable choices for a restore, and will cause an error if selected here. Any scope selection that would restore "all valid" resources automatically excludes these group kinds. - gkebackup.gke.io/BackupJob - gkebackup.gke.io/RestoreJob - metrics.k8s.io/NodeMetrics - migration.k8s.io/StorageState - migration.k8s.io/StorageVersionMigration - Node - snapshot.storage.k8s.io/VolumeSnapshotContent - storage.k8s.io/CSINode Some group kinds are driven by restore configuration elsewhere, and will cause an error if selected here. - Namespace - PersistentVolume # Optional. Identifies the cluster-scoped resources to restore from the Backup. Not specifying it means NO cluster resource will be restored. @@ -313,7 +313,7 @@

Method Details

"a_key": "A String", }, "name": "A String", # Output only. The full name of the RestorePlan resource. Format: `projects/*/locations/*/restorePlans/*`. - "restoreConfig": { # Configuration of a restore. Next id: 13 # Required. Configuration of Restores created via this RestorePlan. + "restoreConfig": { # Configuration of a restore. Next id: 14 # Required. Configuration of Restores created via this RestorePlan. "allNamespaces": True or False, # Restore all namespaced resources in the Backup if set to "True". Specifying this field to "False" is an error. "clusterResourceConflictPolicy": "A String", # Optional. Defines the behavior for handling the situation where cluster-scoped resources being restored already exist in the target cluster. This MUST be set to a value other than CLUSTER_RESOURCE_CONFLICT_POLICY_UNSPECIFIED if cluster_resource_restore_scope is not empty. "clusterResourceRestoreScope": { # Defines the scope of cluster-scoped resources to restore. Some group kinds are not reasonable choices for a restore, and will cause an error if selected here. Any scope selection that would restore "all valid" resources automatically excludes these group kinds. - gkebackup.gke.io/BackupJob - gkebackup.gke.io/RestoreJob - metrics.k8s.io/NodeMetrics - migration.k8s.io/StorageState - migration.k8s.io/StorageVersionMigration - Node - snapshot.storage.k8s.io/VolumeSnapshotContent - storage.k8s.io/CSINode Some group kinds are driven by restore configuration elsewhere, and will cause an error if selected here. - Namespace - PersistentVolume # Optional. Identifies the cluster-scoped resources to restore from the Backup. Not specifying it means NO cluster resource will be restored. @@ -481,7 +481,7 @@

Method Details

"a_key": "A String", }, "name": "A String", # Output only. The full name of the RestorePlan resource. Format: `projects/*/locations/*/restorePlans/*`. - "restoreConfig": { # Configuration of a restore. Next id: 13 # Required. Configuration of Restores created via this RestorePlan. + "restoreConfig": { # Configuration of a restore. Next id: 14 # Required. Configuration of Restores created via this RestorePlan. "allNamespaces": True or False, # Restore all namespaced resources in the Backup if set to "True". Specifying this field to "False" is an error. "clusterResourceConflictPolicy": "A String", # Optional. Defines the behavior for handling the situation where cluster-scoped resources being restored already exist in the target cluster. This MUST be set to a value other than CLUSTER_RESOURCE_CONFLICT_POLICY_UNSPECIFIED if cluster_resource_restore_scope is not empty. "clusterResourceRestoreScope": { # Defines the scope of cluster-scoped resources to restore. Some group kinds are not reasonable choices for a restore, and will cause an error if selected here. Any scope selection that would restore "all valid" resources automatically excludes these group kinds. - gkebackup.gke.io/BackupJob - gkebackup.gke.io/RestoreJob - metrics.k8s.io/NodeMetrics - migration.k8s.io/StorageState - migration.k8s.io/StorageVersionMigration - Node - snapshot.storage.k8s.io/VolumeSnapshotContent - storage.k8s.io/CSINode Some group kinds are driven by restore configuration elsewhere, and will cause an error if selected here. - Namespace - PersistentVolume # Optional. Identifies the cluster-scoped resources to restore from the Backup. Not specifying it means NO cluster resource will be restored. @@ -608,7 +608,7 @@

Method Details

"a_key": "A String", }, "name": "A String", # Output only. The full name of the RestorePlan resource. Format: `projects/*/locations/*/restorePlans/*`. - "restoreConfig": { # Configuration of a restore. Next id: 13 # Required. Configuration of Restores created via this RestorePlan. + "restoreConfig": { # Configuration of a restore. Next id: 14 # Required. Configuration of Restores created via this RestorePlan. "allNamespaces": True or False, # Restore all namespaced resources in the Backup if set to "True". Specifying this field to "False" is an error. "clusterResourceConflictPolicy": "A String", # Optional. Defines the behavior for handling the situation where cluster-scoped resources being restored already exist in the target cluster. This MUST be set to a value other than CLUSTER_RESOURCE_CONFLICT_POLICY_UNSPECIFIED if cluster_resource_restore_scope is not empty. "clusterResourceRestoreScope": { # Defines the scope of cluster-scoped resources to restore. Some group kinds are not reasonable choices for a restore, and will cause an error if selected here. Any scope selection that would restore "all valid" resources automatically excludes these group kinds. - gkebackup.gke.io/BackupJob - gkebackup.gke.io/RestoreJob - metrics.k8s.io/NodeMetrics - migration.k8s.io/StorageState - migration.k8s.io/StorageVersionMigration - Node - snapshot.storage.k8s.io/VolumeSnapshotContent - storage.k8s.io/CSINode Some group kinds are driven by restore configuration elsewhere, and will cause an error if selected here. - Namespace - PersistentVolume # Optional. Identifies the cluster-scoped resources to restore from the Backup. Not specifying it means NO cluster resource will be restored. diff --git a/docs/dyn/gkebackup_v1.projects.locations.restorePlans.restores.html b/docs/dyn/gkebackup_v1.projects.locations.restorePlans.restores.html index 2ec77e4c575..62465e991d6 100644 --- a/docs/dyn/gkebackup_v1.projects.locations.restorePlans.restores.html +++ b/docs/dyn/gkebackup_v1.projects.locations.restorePlans.restores.html @@ -138,7 +138,7 @@

Method Details

"resourcesExcludedCount": 42, # Output only. Number of resources excluded during the restore execution. "resourcesFailedCount": 42, # Output only. Number of resources that failed to be restored during the restore execution. "resourcesRestoredCount": 42, # Output only. Number of resources restored during the restore execution. - "restoreConfig": { # Configuration of a restore. Next id: 13 # Output only. Configuration of the Restore. Inherited from parent RestorePlan's restore_config. + "restoreConfig": { # Configuration of a restore. Next id: 14 # Output only. Configuration of the Restore. Inherited from parent RestorePlan's restore_config. "allNamespaces": True or False, # Restore all namespaced resources in the Backup if set to "True". Specifying this field to "False" is an error. "clusterResourceConflictPolicy": "A String", # Optional. Defines the behavior for handling the situation where cluster-scoped resources being restored already exist in the target cluster. This MUST be set to a value other than CLUSTER_RESOURCE_CONFLICT_POLICY_UNSPECIFIED if cluster_resource_restore_scope is not empty. "clusterResourceRestoreScope": { # Defines the scope of cluster-scoped resources to restore. Some group kinds are not reasonable choices for a restore, and will cause an error if selected here. Any scope selection that would restore "all valid" resources automatically excludes these group kinds. - gkebackup.gke.io/BackupJob - gkebackup.gke.io/RestoreJob - metrics.k8s.io/NodeMetrics - migration.k8s.io/StorageState - migration.k8s.io/StorageVersionMigration - Node - snapshot.storage.k8s.io/VolumeSnapshotContent - storage.k8s.io/CSINode Some group kinds are driven by restore configuration elsewhere, and will cause an error if selected here. - Namespace - PersistentVolume # Optional. Identifies the cluster-scoped resources to restore from the Backup. Not specifying it means NO cluster resource will be restored. @@ -322,7 +322,7 @@

Method Details

"resourcesExcludedCount": 42, # Output only. Number of resources excluded during the restore execution. "resourcesFailedCount": 42, # Output only. Number of resources that failed to be restored during the restore execution. "resourcesRestoredCount": 42, # Output only. Number of resources restored during the restore execution. - "restoreConfig": { # Configuration of a restore. Next id: 13 # Output only. Configuration of the Restore. Inherited from parent RestorePlan's restore_config. + "restoreConfig": { # Configuration of a restore. Next id: 14 # Output only. Configuration of the Restore. Inherited from parent RestorePlan's restore_config. "allNamespaces": True or False, # Restore all namespaced resources in the Backup if set to "True". Specifying this field to "False" is an error. "clusterResourceConflictPolicy": "A String", # Optional. Defines the behavior for handling the situation where cluster-scoped resources being restored already exist in the target cluster. This MUST be set to a value other than CLUSTER_RESOURCE_CONFLICT_POLICY_UNSPECIFIED if cluster_resource_restore_scope is not empty. "clusterResourceRestoreScope": { # Defines the scope of cluster-scoped resources to restore. Some group kinds are not reasonable choices for a restore, and will cause an error if selected here. Any scope selection that would restore "all valid" resources automatically excludes these group kinds. - gkebackup.gke.io/BackupJob - gkebackup.gke.io/RestoreJob - metrics.k8s.io/NodeMetrics - migration.k8s.io/StorageState - migration.k8s.io/StorageVersionMigration - Node - snapshot.storage.k8s.io/VolumeSnapshotContent - storage.k8s.io/CSINode Some group kinds are driven by restore configuration elsewhere, and will cause an error if selected here. - Namespace - PersistentVolume # Optional. Identifies the cluster-scoped resources to restore from the Backup. Not specifying it means NO cluster resource will be restored. @@ -495,7 +495,7 @@

Method Details

"resourcesExcludedCount": 42, # Output only. Number of resources excluded during the restore execution. "resourcesFailedCount": 42, # Output only. Number of resources that failed to be restored during the restore execution. "resourcesRestoredCount": 42, # Output only. Number of resources restored during the restore execution. - "restoreConfig": { # Configuration of a restore. Next id: 13 # Output only. Configuration of the Restore. Inherited from parent RestorePlan's restore_config. + "restoreConfig": { # Configuration of a restore. Next id: 14 # Output only. Configuration of the Restore. Inherited from parent RestorePlan's restore_config. "allNamespaces": True or False, # Restore all namespaced resources in the Backup if set to "True". Specifying this field to "False" is an error. "clusterResourceConflictPolicy": "A String", # Optional. Defines the behavior for handling the situation where cluster-scoped resources being restored already exist in the target cluster. This MUST be set to a value other than CLUSTER_RESOURCE_CONFLICT_POLICY_UNSPECIFIED if cluster_resource_restore_scope is not empty. "clusterResourceRestoreScope": { # Defines the scope of cluster-scoped resources to restore. Some group kinds are not reasonable choices for a restore, and will cause an error if selected here. Any scope selection that would restore "all valid" resources automatically excludes these group kinds. - gkebackup.gke.io/BackupJob - gkebackup.gke.io/RestoreJob - metrics.k8s.io/NodeMetrics - migration.k8s.io/StorageState - migration.k8s.io/StorageVersionMigration - Node - snapshot.storage.k8s.io/VolumeSnapshotContent - storage.k8s.io/CSINode Some group kinds are driven by restore configuration elsewhere, and will cause an error if selected here. - Namespace - PersistentVolume # Optional. Identifies the cluster-scoped resources to restore from the Backup. Not specifying it means NO cluster resource will be restored. @@ -627,7 +627,7 @@

Method Details

"resourcesExcludedCount": 42, # Output only. Number of resources excluded during the restore execution. "resourcesFailedCount": 42, # Output only. Number of resources that failed to be restored during the restore execution. "resourcesRestoredCount": 42, # Output only. Number of resources restored during the restore execution. - "restoreConfig": { # Configuration of a restore. Next id: 13 # Output only. Configuration of the Restore. Inherited from parent RestorePlan's restore_config. + "restoreConfig": { # Configuration of a restore. Next id: 14 # Output only. Configuration of the Restore. Inherited from parent RestorePlan's restore_config. "allNamespaces": True or False, # Restore all namespaced resources in the Backup if set to "True". Specifying this field to "False" is an error. "clusterResourceConflictPolicy": "A String", # Optional. Defines the behavior for handling the situation where cluster-scoped resources being restored already exist in the target cluster. This MUST be set to a value other than CLUSTER_RESOURCE_CONFLICT_POLICY_UNSPECIFIED if cluster_resource_restore_scope is not empty. "clusterResourceRestoreScope": { # Defines the scope of cluster-scoped resources to restore. Some group kinds are not reasonable choices for a restore, and will cause an error if selected here. Any scope selection that would restore "all valid" resources automatically excludes these group kinds. - gkebackup.gke.io/BackupJob - gkebackup.gke.io/RestoreJob - metrics.k8s.io/NodeMetrics - migration.k8s.io/StorageState - migration.k8s.io/StorageVersionMigration - Node - snapshot.storage.k8s.io/VolumeSnapshotContent - storage.k8s.io/CSINode Some group kinds are driven by restore configuration elsewhere, and will cause an error if selected here. - Namespace - PersistentVolume # Optional. Identifies the cluster-scoped resources to restore from the Backup. Not specifying it means NO cluster resource will be restored. diff --git a/docs/dyn/gkehub_v1.projects.locations.features.html b/docs/dyn/gkehub_v1.projects.locations.features.html index 171d9b77530..73b8d300980 100644 --- a/docs/dyn/gkehub_v1.projects.locations.features.html +++ b/docs/dyn/gkehub_v1.projects.locations.features.html @@ -138,7 +138,7 @@

Method Details

"syncRev": "A String", # Git revision (tag or hash) to check out. Default HEAD. "syncWaitSecs": "A String", # Period in seconds between consecutive syncs. Default: 15. }, - "metricsGcpServiceAccountEmail": "A String", # The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. This field is required when automatic Feature management is enabled. + "metricsGcpServiceAccountEmail": "A String", # The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. "oci": { # OCI repo configuration for a single cluster # OCI repo configuration for the cluster "gcpServiceAccountEmail": "A String", # The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. "policyDir": "A String", # The absolute path of the directory that contains the local resources. Default: the root directory of the image. @@ -288,7 +288,7 @@

Method Details

"syncRev": "A String", # Git revision (tag or hash) to check out. Default HEAD. "syncWaitSecs": "A String", # Period in seconds between consecutive syncs. Default: 15. }, - "metricsGcpServiceAccountEmail": "A String", # The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. This field is required when automatic Feature management is enabled. + "metricsGcpServiceAccountEmail": "A String", # The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. "oci": { # OCI repo configuration for a single cluster # OCI repo configuration for the cluster "gcpServiceAccountEmail": "A String", # The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. "policyDir": "A String", # The absolute path of the directory that contains the local resources. Default: the root directory of the image. @@ -433,14 +433,10 @@

Method Details

}, }, "clusterupgrade": { # Per-membership state for this feature. # ClusterUpgrade state. - "fleet": "A String", # Project number or id of the fleet. It is set only for Memberships that are part of fleet-based Rollout Sequencing. "ignored": { # IgnoredMembership represents a membership ignored by the feature. A membership can be ignored because it was manually upgraded to a newer version than RC default. # Whether this membership is ignored by the feature. For example, manually upgraded clusters can be ignored if they are newer than the default versions of its release channel. "ignoredTime": "A String", # Time when the membership was first set to ignored. "reason": "A String", # Reason why the membership is ignored. }, - "scopes": [ # Fully qualified scope names that this clusters is bound to which also have rollout sequencing enabled. - "A String", - ], "upgrades": [ # Actual upgrade state against desired. { # ScopeGKEUpgradeState is a GKEUpgrade and its state per-membership. "status": { # UpgradeStatus provides status information for each upgrade. # Status of the upgrade. @@ -533,7 +529,7 @@

Method Details

"syncRev": "A String", # Git revision (tag or hash) to check out. Default HEAD. "syncWaitSecs": "A String", # Period in seconds between consecutive syncs. Default: 15. }, - "metricsGcpServiceAccountEmail": "A String", # The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. This field is required when automatic Feature management is enabled. + "metricsGcpServiceAccountEmail": "A String", # The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. "oci": { # OCI repo configuration for a single cluster # OCI repo configuration for the cluster "gcpServiceAccountEmail": "A String", # The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. "policyDir": "A String", # The absolute path of the directory that contains the local resources. Default: the root directory of the image. @@ -925,7 +921,7 @@

Method Details

"syncRev": "A String", # Git revision (tag or hash) to check out. Default HEAD. "syncWaitSecs": "A String", # Period in seconds between consecutive syncs. Default: 15. }, - "metricsGcpServiceAccountEmail": "A String", # The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. This field is required when automatic Feature management is enabled. + "metricsGcpServiceAccountEmail": "A String", # The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. "oci": { # OCI repo configuration for a single cluster # OCI repo configuration for the cluster "gcpServiceAccountEmail": "A String", # The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. "policyDir": "A String", # The absolute path of the directory that contains the local resources. Default: the root directory of the image. @@ -1075,7 +1071,7 @@

Method Details

"syncRev": "A String", # Git revision (tag or hash) to check out. Default HEAD. "syncWaitSecs": "A String", # Period in seconds between consecutive syncs. Default: 15. }, - "metricsGcpServiceAccountEmail": "A String", # The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. This field is required when automatic Feature management is enabled. + "metricsGcpServiceAccountEmail": "A String", # The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. "oci": { # OCI repo configuration for a single cluster # OCI repo configuration for the cluster "gcpServiceAccountEmail": "A String", # The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. "policyDir": "A String", # The absolute path of the directory that contains the local resources. Default: the root directory of the image. @@ -1220,14 +1216,10 @@

Method Details

}, }, "clusterupgrade": { # Per-membership state for this feature. # ClusterUpgrade state. - "fleet": "A String", # Project number or id of the fleet. It is set only for Memberships that are part of fleet-based Rollout Sequencing. "ignored": { # IgnoredMembership represents a membership ignored by the feature. A membership can be ignored because it was manually upgraded to a newer version than RC default. # Whether this membership is ignored by the feature. For example, manually upgraded clusters can be ignored if they are newer than the default versions of its release channel. "ignoredTime": "A String", # Time when the membership was first set to ignored. "reason": "A String", # Reason why the membership is ignored. }, - "scopes": [ # Fully qualified scope names that this clusters is bound to which also have rollout sequencing enabled. - "A String", - ], "upgrades": [ # Actual upgrade state against desired. { # ScopeGKEUpgradeState is a GKEUpgrade and its state per-membership. "status": { # UpgradeStatus provides status information for each upgrade. # Status of the upgrade. @@ -1320,7 +1312,7 @@

Method Details

"syncRev": "A String", # Git revision (tag or hash) to check out. Default HEAD. "syncWaitSecs": "A String", # Period in seconds between consecutive syncs. Default: 15. }, - "metricsGcpServiceAccountEmail": "A String", # The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. This field is required when automatic Feature management is enabled. + "metricsGcpServiceAccountEmail": "A String", # The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. "oci": { # OCI repo configuration for a single cluster # OCI repo configuration for the cluster "gcpServiceAccountEmail": "A String", # The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. "policyDir": "A String", # The absolute path of the directory that contains the local resources. Default: the root directory of the image. @@ -1700,7 +1692,7 @@

Method Details

"syncRev": "A String", # Git revision (tag or hash) to check out. Default HEAD. "syncWaitSecs": "A String", # Period in seconds between consecutive syncs. Default: 15. }, - "metricsGcpServiceAccountEmail": "A String", # The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. This field is required when automatic Feature management is enabled. + "metricsGcpServiceAccountEmail": "A String", # The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. "oci": { # OCI repo configuration for a single cluster # OCI repo configuration for the cluster "gcpServiceAccountEmail": "A String", # The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. "policyDir": "A String", # The absolute path of the directory that contains the local resources. Default: the root directory of the image. @@ -1850,7 +1842,7 @@

Method Details

"syncRev": "A String", # Git revision (tag or hash) to check out. Default HEAD. "syncWaitSecs": "A String", # Period in seconds between consecutive syncs. Default: 15. }, - "metricsGcpServiceAccountEmail": "A String", # The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. This field is required when automatic Feature management is enabled. + "metricsGcpServiceAccountEmail": "A String", # The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. "oci": { # OCI repo configuration for a single cluster # OCI repo configuration for the cluster "gcpServiceAccountEmail": "A String", # The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. "policyDir": "A String", # The absolute path of the directory that contains the local resources. Default: the root directory of the image. @@ -1995,14 +1987,10 @@

Method Details

}, }, "clusterupgrade": { # Per-membership state for this feature. # ClusterUpgrade state. - "fleet": "A String", # Project number or id of the fleet. It is set only for Memberships that are part of fleet-based Rollout Sequencing. "ignored": { # IgnoredMembership represents a membership ignored by the feature. A membership can be ignored because it was manually upgraded to a newer version than RC default. # Whether this membership is ignored by the feature. For example, manually upgraded clusters can be ignored if they are newer than the default versions of its release channel. "ignoredTime": "A String", # Time when the membership was first set to ignored. "reason": "A String", # Reason why the membership is ignored. }, - "scopes": [ # Fully qualified scope names that this clusters is bound to which also have rollout sequencing enabled. - "A String", - ], "upgrades": [ # Actual upgrade state against desired. { # ScopeGKEUpgradeState is a GKEUpgrade and its state per-membership. "status": { # UpgradeStatus provides status information for each upgrade. # Status of the upgrade. @@ -2095,7 +2083,7 @@

Method Details

"syncRev": "A String", # Git revision (tag or hash) to check out. Default HEAD. "syncWaitSecs": "A String", # Period in seconds between consecutive syncs. Default: 15. }, - "metricsGcpServiceAccountEmail": "A String", # The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. This field is required when automatic Feature management is enabled. + "metricsGcpServiceAccountEmail": "A String", # The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. "oci": { # OCI repo configuration for a single cluster # OCI repo configuration for the cluster "gcpServiceAccountEmail": "A String", # The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. "policyDir": "A String", # The absolute path of the directory that contains the local resources. Default: the root directory of the image. @@ -2431,7 +2419,7 @@

Method Details

"syncRev": "A String", # Git revision (tag or hash) to check out. Default HEAD. "syncWaitSecs": "A String", # Period in seconds between consecutive syncs. Default: 15. }, - "metricsGcpServiceAccountEmail": "A String", # The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. This field is required when automatic Feature management is enabled. + "metricsGcpServiceAccountEmail": "A String", # The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. "oci": { # OCI repo configuration for a single cluster # OCI repo configuration for the cluster "gcpServiceAccountEmail": "A String", # The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. "policyDir": "A String", # The absolute path of the directory that contains the local resources. Default: the root directory of the image. @@ -2581,7 +2569,7 @@

Method Details

"syncRev": "A String", # Git revision (tag or hash) to check out. Default HEAD. "syncWaitSecs": "A String", # Period in seconds between consecutive syncs. Default: 15. }, - "metricsGcpServiceAccountEmail": "A String", # The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. This field is required when automatic Feature management is enabled. + "metricsGcpServiceAccountEmail": "A String", # The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. "oci": { # OCI repo configuration for a single cluster # OCI repo configuration for the cluster "gcpServiceAccountEmail": "A String", # The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. "policyDir": "A String", # The absolute path of the directory that contains the local resources. Default: the root directory of the image. @@ -2726,14 +2714,10 @@

Method Details

}, }, "clusterupgrade": { # Per-membership state for this feature. # ClusterUpgrade state. - "fleet": "A String", # Project number or id of the fleet. It is set only for Memberships that are part of fleet-based Rollout Sequencing. "ignored": { # IgnoredMembership represents a membership ignored by the feature. A membership can be ignored because it was manually upgraded to a newer version than RC default. # Whether this membership is ignored by the feature. For example, manually upgraded clusters can be ignored if they are newer than the default versions of its release channel. "ignoredTime": "A String", # Time when the membership was first set to ignored. "reason": "A String", # Reason why the membership is ignored. }, - "scopes": [ # Fully qualified scope names that this clusters is bound to which also have rollout sequencing enabled. - "A String", - ], "upgrades": [ # Actual upgrade state against desired. { # ScopeGKEUpgradeState is a GKEUpgrade and its state per-membership. "status": { # UpgradeStatus provides status information for each upgrade. # Status of the upgrade. @@ -2826,7 +2810,7 @@

Method Details

"syncRev": "A String", # Git revision (tag or hash) to check out. Default HEAD. "syncWaitSecs": "A String", # Period in seconds between consecutive syncs. Default: 15. }, - "metricsGcpServiceAccountEmail": "A String", # The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. This field is required when automatic Feature management is enabled. + "metricsGcpServiceAccountEmail": "A String", # The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. "oci": { # OCI repo configuration for a single cluster # OCI repo configuration for the cluster "gcpServiceAccountEmail": "A String", # The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. "policyDir": "A String", # The absolute path of the directory that contains the local resources. Default: the root directory of the image. diff --git a/docs/dyn/gkehub_v1alpha.projects.locations.features.html b/docs/dyn/gkehub_v1alpha.projects.locations.features.html index 01589632866..cb69b2c57c4 100644 --- a/docs/dyn/gkehub_v1alpha.projects.locations.features.html +++ b/docs/dyn/gkehub_v1alpha.projects.locations.features.html @@ -141,7 +141,7 @@

Method Details

"syncRev": "A String", # Git revision (tag or hash) to check out. Default HEAD. "syncWaitSecs": "A String", # Period in seconds between consecutive syncs. Default: 15. }, - "metricsGcpServiceAccountEmail": "A String", # The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. This field is required when automatic Feature management is enabled. + "metricsGcpServiceAccountEmail": "A String", # The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. "oci": { # OCI repo configuration for a single cluster # OCI repo configuration for the cluster "gcpServiceAccountEmail": "A String", # The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. "policyDir": "A String", # The absolute path of the directory that contains the local resources. Default: the root directory of the image. @@ -304,7 +304,7 @@

Method Details

"syncRev": "A String", # Git revision (tag or hash) to check out. Default HEAD. "syncWaitSecs": "A String", # Period in seconds between consecutive syncs. Default: 15. }, - "metricsGcpServiceAccountEmail": "A String", # The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. This field is required when automatic Feature management is enabled. + "metricsGcpServiceAccountEmail": "A String", # The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. "oci": { # OCI repo configuration for a single cluster # OCI repo configuration for the cluster "gcpServiceAccountEmail": "A String", # The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. "policyDir": "A String", # The absolute path of the directory that contains the local resources. Default: the root directory of the image. @@ -455,7 +455,6 @@

Method Details

}, }, "clusterupgrade": { # Per-membership state for this feature. # ClusterUpgrade state. - "fleet": "A String", # Project number or id of the fleet. It is set only for Memberships that are part of fleet-based Rollout Sequencing. "ignored": { # IgnoredMembership represents a membership ignored by the feature. A membership can be ignored because it was manually upgraded to a newer version than RC default. # Whether this membership is ignored by the feature. For example, manually upgraded clusters can be ignored if they are newer than the default versions of its release channel. "ignoredTime": "A String", # Time when the membership was first set to ignored. "reason": "A String", # Reason why the membership is ignored. @@ -564,7 +563,7 @@

Method Details

"syncRev": "A String", # Git revision (tag or hash) to check out. Default HEAD. "syncWaitSecs": "A String", # Period in seconds between consecutive syncs. Default: 15. }, - "metricsGcpServiceAccountEmail": "A String", # The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. This field is required when automatic Feature management is enabled. + "metricsGcpServiceAccountEmail": "A String", # The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. "oci": { # OCI repo configuration for a single cluster # OCI repo configuration for the cluster "gcpServiceAccountEmail": "A String", # The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. "policyDir": "A String", # The absolute path of the directory that contains the local resources. Default: the root directory of the image. @@ -1086,7 +1085,7 @@

Method Details

"syncRev": "A String", # Git revision (tag or hash) to check out. Default HEAD. "syncWaitSecs": "A String", # Period in seconds between consecutive syncs. Default: 15. }, - "metricsGcpServiceAccountEmail": "A String", # The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. This field is required when automatic Feature management is enabled. + "metricsGcpServiceAccountEmail": "A String", # The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. "oci": { # OCI repo configuration for a single cluster # OCI repo configuration for the cluster "gcpServiceAccountEmail": "A String", # The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. "policyDir": "A String", # The absolute path of the directory that contains the local resources. Default: the root directory of the image. @@ -1249,7 +1248,7 @@

Method Details

"syncRev": "A String", # Git revision (tag or hash) to check out. Default HEAD. "syncWaitSecs": "A String", # Period in seconds between consecutive syncs. Default: 15. }, - "metricsGcpServiceAccountEmail": "A String", # The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. This field is required when automatic Feature management is enabled. + "metricsGcpServiceAccountEmail": "A String", # The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. "oci": { # OCI repo configuration for a single cluster # OCI repo configuration for the cluster "gcpServiceAccountEmail": "A String", # The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. "policyDir": "A String", # The absolute path of the directory that contains the local resources. Default: the root directory of the image. @@ -1400,7 +1399,6 @@

Method Details

}, }, "clusterupgrade": { # Per-membership state for this feature. # ClusterUpgrade state. - "fleet": "A String", # Project number or id of the fleet. It is set only for Memberships that are part of fleet-based Rollout Sequencing. "ignored": { # IgnoredMembership represents a membership ignored by the feature. A membership can be ignored because it was manually upgraded to a newer version than RC default. # Whether this membership is ignored by the feature. For example, manually upgraded clusters can be ignored if they are newer than the default versions of its release channel. "ignoredTime": "A String", # Time when the membership was first set to ignored. "reason": "A String", # Reason why the membership is ignored. @@ -1509,7 +1507,7 @@

Method Details

"syncRev": "A String", # Git revision (tag or hash) to check out. Default HEAD. "syncWaitSecs": "A String", # Period in seconds between consecutive syncs. Default: 15. }, - "metricsGcpServiceAccountEmail": "A String", # The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. This field is required when automatic Feature management is enabled. + "metricsGcpServiceAccountEmail": "A String", # The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. "oci": { # OCI repo configuration for a single cluster # OCI repo configuration for the cluster "gcpServiceAccountEmail": "A String", # The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. "policyDir": "A String", # The absolute path of the directory that contains the local resources. Default: the root directory of the image. @@ -2019,7 +2017,7 @@

Method Details

"syncRev": "A String", # Git revision (tag or hash) to check out. Default HEAD. "syncWaitSecs": "A String", # Period in seconds between consecutive syncs. Default: 15. }, - "metricsGcpServiceAccountEmail": "A String", # The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. This field is required when automatic Feature management is enabled. + "metricsGcpServiceAccountEmail": "A String", # The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. "oci": { # OCI repo configuration for a single cluster # OCI repo configuration for the cluster "gcpServiceAccountEmail": "A String", # The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. "policyDir": "A String", # The absolute path of the directory that contains the local resources. Default: the root directory of the image. @@ -2182,7 +2180,7 @@

Method Details

"syncRev": "A String", # Git revision (tag or hash) to check out. Default HEAD. "syncWaitSecs": "A String", # Period in seconds between consecutive syncs. Default: 15. }, - "metricsGcpServiceAccountEmail": "A String", # The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. This field is required when automatic Feature management is enabled. + "metricsGcpServiceAccountEmail": "A String", # The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. "oci": { # OCI repo configuration for a single cluster # OCI repo configuration for the cluster "gcpServiceAccountEmail": "A String", # The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. "policyDir": "A String", # The absolute path of the directory that contains the local resources. Default: the root directory of the image. @@ -2333,7 +2331,6 @@

Method Details

}, }, "clusterupgrade": { # Per-membership state for this feature. # ClusterUpgrade state. - "fleet": "A String", # Project number or id of the fleet. It is set only for Memberships that are part of fleet-based Rollout Sequencing. "ignored": { # IgnoredMembership represents a membership ignored by the feature. A membership can be ignored because it was manually upgraded to a newer version than RC default. # Whether this membership is ignored by the feature. For example, manually upgraded clusters can be ignored if they are newer than the default versions of its release channel. "ignoredTime": "A String", # Time when the membership was first set to ignored. "reason": "A String", # Reason why the membership is ignored. @@ -2442,7 +2439,7 @@

Method Details

"syncRev": "A String", # Git revision (tag or hash) to check out. Default HEAD. "syncWaitSecs": "A String", # Period in seconds between consecutive syncs. Default: 15. }, - "metricsGcpServiceAccountEmail": "A String", # The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. This field is required when automatic Feature management is enabled. + "metricsGcpServiceAccountEmail": "A String", # The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. "oci": { # OCI repo configuration for a single cluster # OCI repo configuration for the cluster "gcpServiceAccountEmail": "A String", # The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. "policyDir": "A String", # The absolute path of the directory that contains the local resources. Default: the root directory of the image. @@ -2908,7 +2905,7 @@

Method Details

"syncRev": "A String", # Git revision (tag or hash) to check out. Default HEAD. "syncWaitSecs": "A String", # Period in seconds between consecutive syncs. Default: 15. }, - "metricsGcpServiceAccountEmail": "A String", # The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. This field is required when automatic Feature management is enabled. + "metricsGcpServiceAccountEmail": "A String", # The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. "oci": { # OCI repo configuration for a single cluster # OCI repo configuration for the cluster "gcpServiceAccountEmail": "A String", # The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. "policyDir": "A String", # The absolute path of the directory that contains the local resources. Default: the root directory of the image. @@ -3071,7 +3068,7 @@

Method Details

"syncRev": "A String", # Git revision (tag or hash) to check out. Default HEAD. "syncWaitSecs": "A String", # Period in seconds between consecutive syncs. Default: 15. }, - "metricsGcpServiceAccountEmail": "A String", # The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. This field is required when automatic Feature management is enabled. + "metricsGcpServiceAccountEmail": "A String", # The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. "oci": { # OCI repo configuration for a single cluster # OCI repo configuration for the cluster "gcpServiceAccountEmail": "A String", # The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. "policyDir": "A String", # The absolute path of the directory that contains the local resources. Default: the root directory of the image. @@ -3222,7 +3219,6 @@

Method Details

}, }, "clusterupgrade": { # Per-membership state for this feature. # ClusterUpgrade state. - "fleet": "A String", # Project number or id of the fleet. It is set only for Memberships that are part of fleet-based Rollout Sequencing. "ignored": { # IgnoredMembership represents a membership ignored by the feature. A membership can be ignored because it was manually upgraded to a newer version than RC default. # Whether this membership is ignored by the feature. For example, manually upgraded clusters can be ignored if they are newer than the default versions of its release channel. "ignoredTime": "A String", # Time when the membership was first set to ignored. "reason": "A String", # Reason why the membership is ignored. @@ -3331,7 +3327,7 @@

Method Details

"syncRev": "A String", # Git revision (tag or hash) to check out. Default HEAD. "syncWaitSecs": "A String", # Period in seconds between consecutive syncs. Default: 15. }, - "metricsGcpServiceAccountEmail": "A String", # The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. This field is required when automatic Feature management is enabled. + "metricsGcpServiceAccountEmail": "A String", # The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. "oci": { # OCI repo configuration for a single cluster # OCI repo configuration for the cluster "gcpServiceAccountEmail": "A String", # The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. "policyDir": "A String", # The absolute path of the directory that contains the local resources. Default: the root directory of the image. diff --git a/docs/dyn/gkehub_v1beta.projects.locations.features.html b/docs/dyn/gkehub_v1beta.projects.locations.features.html index 9e8870c6d34..8f2a5a8473c 100644 --- a/docs/dyn/gkehub_v1beta.projects.locations.features.html +++ b/docs/dyn/gkehub_v1beta.projects.locations.features.html @@ -141,7 +141,7 @@

Method Details

"syncRev": "A String", # Git revision (tag or hash) to check out. Default HEAD. "syncWaitSecs": "A String", # Period in seconds between consecutive syncs. Default: 15. }, - "metricsGcpServiceAccountEmail": "A String", # The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. This field is required when automatic Feature management is enabled. + "metricsGcpServiceAccountEmail": "A String", # The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. "oci": { # OCI repo configuration for a single cluster # OCI repo configuration for the cluster "gcpServiceAccountEmail": "A String", # The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. "policyDir": "A String", # The absolute path of the directory that contains the local resources. Default: the root directory of the image. @@ -303,7 +303,7 @@

Method Details

"syncRev": "A String", # Git revision (tag or hash) to check out. Default HEAD. "syncWaitSecs": "A String", # Period in seconds between consecutive syncs. Default: 15. }, - "metricsGcpServiceAccountEmail": "A String", # The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. This field is required when automatic Feature management is enabled. + "metricsGcpServiceAccountEmail": "A String", # The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. "oci": { # OCI repo configuration for a single cluster # OCI repo configuration for the cluster "gcpServiceAccountEmail": "A String", # The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. "policyDir": "A String", # The absolute path of the directory that contains the local resources. Default: the root directory of the image. @@ -448,14 +448,10 @@

Method Details

}, }, "clusterupgrade": { # Per-membership state for this feature. # ClusterUpgrade state. - "fleet": "A String", # Project number or id of the fleet. It is set only for Memberships that are part of fleet-based Rollout Sequencing. "ignored": { # IgnoredMembership represents a membership ignored by the feature. A membership can be ignored because it was manually upgraded to a newer version than RC default. # Whether this membership is ignored by the feature. For example, manually upgraded clusters can be ignored if they are newer than the default versions of its release channel. "ignoredTime": "A String", # Time when the membership was first set to ignored. "reason": "A String", # Reason why the membership is ignored. }, - "scopes": [ # Fully qualified scope names that this clusters is bound to which also have rollout sequencing enabled. - "A String", - ], "upgrades": [ # Actual upgrade state against desired. { # ScopeGKEUpgradeState is a GKEUpgrade and its state per-membership. "status": { # UpgradeStatus provides status information for each upgrade. # Status of the upgrade. @@ -557,7 +553,7 @@

Method Details

"syncRev": "A String", # Git revision (tag or hash) to check out. Default HEAD. "syncWaitSecs": "A String", # Period in seconds between consecutive syncs. Default: 15. }, - "metricsGcpServiceAccountEmail": "A String", # The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. This field is required when automatic Feature management is enabled. + "metricsGcpServiceAccountEmail": "A String", # The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. "oci": { # OCI repo configuration for a single cluster # OCI repo configuration for the cluster "gcpServiceAccountEmail": "A String", # The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. "policyDir": "A String", # The absolute path of the directory that contains the local resources. Default: the root directory of the image. @@ -964,7 +960,7 @@

Method Details

"syncRev": "A String", # Git revision (tag or hash) to check out. Default HEAD. "syncWaitSecs": "A String", # Period in seconds between consecutive syncs. Default: 15. }, - "metricsGcpServiceAccountEmail": "A String", # The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. This field is required when automatic Feature management is enabled. + "metricsGcpServiceAccountEmail": "A String", # The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. "oci": { # OCI repo configuration for a single cluster # OCI repo configuration for the cluster "gcpServiceAccountEmail": "A String", # The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. "policyDir": "A String", # The absolute path of the directory that contains the local resources. Default: the root directory of the image. @@ -1126,7 +1122,7 @@

Method Details

"syncRev": "A String", # Git revision (tag or hash) to check out. Default HEAD. "syncWaitSecs": "A String", # Period in seconds between consecutive syncs. Default: 15. }, - "metricsGcpServiceAccountEmail": "A String", # The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. This field is required when automatic Feature management is enabled. + "metricsGcpServiceAccountEmail": "A String", # The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. "oci": { # OCI repo configuration for a single cluster # OCI repo configuration for the cluster "gcpServiceAccountEmail": "A String", # The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. "policyDir": "A String", # The absolute path of the directory that contains the local resources. Default: the root directory of the image. @@ -1271,14 +1267,10 @@

Method Details

}, }, "clusterupgrade": { # Per-membership state for this feature. # ClusterUpgrade state. - "fleet": "A String", # Project number or id of the fleet. It is set only for Memberships that are part of fleet-based Rollout Sequencing. "ignored": { # IgnoredMembership represents a membership ignored by the feature. A membership can be ignored because it was manually upgraded to a newer version than RC default. # Whether this membership is ignored by the feature. For example, manually upgraded clusters can be ignored if they are newer than the default versions of its release channel. "ignoredTime": "A String", # Time when the membership was first set to ignored. "reason": "A String", # Reason why the membership is ignored. }, - "scopes": [ # Fully qualified scope names that this clusters is bound to which also have rollout sequencing enabled. - "A String", - ], "upgrades": [ # Actual upgrade state against desired. { # ScopeGKEUpgradeState is a GKEUpgrade and its state per-membership. "status": { # UpgradeStatus provides status information for each upgrade. # Status of the upgrade. @@ -1380,7 +1372,7 @@

Method Details

"syncRev": "A String", # Git revision (tag or hash) to check out. Default HEAD. "syncWaitSecs": "A String", # Period in seconds between consecutive syncs. Default: 15. }, - "metricsGcpServiceAccountEmail": "A String", # The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. This field is required when automatic Feature management is enabled. + "metricsGcpServiceAccountEmail": "A String", # The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. "oci": { # OCI repo configuration for a single cluster # OCI repo configuration for the cluster "gcpServiceAccountEmail": "A String", # The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. "policyDir": "A String", # The absolute path of the directory that contains the local resources. Default: the root directory of the image. @@ -1775,7 +1767,7 @@

Method Details

"syncRev": "A String", # Git revision (tag or hash) to check out. Default HEAD. "syncWaitSecs": "A String", # Period in seconds between consecutive syncs. Default: 15. }, - "metricsGcpServiceAccountEmail": "A String", # The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. This field is required when automatic Feature management is enabled. + "metricsGcpServiceAccountEmail": "A String", # The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. "oci": { # OCI repo configuration for a single cluster # OCI repo configuration for the cluster "gcpServiceAccountEmail": "A String", # The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. "policyDir": "A String", # The absolute path of the directory that contains the local resources. Default: the root directory of the image. @@ -1937,7 +1929,7 @@

Method Details

"syncRev": "A String", # Git revision (tag or hash) to check out. Default HEAD. "syncWaitSecs": "A String", # Period in seconds between consecutive syncs. Default: 15. }, - "metricsGcpServiceAccountEmail": "A String", # The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. This field is required when automatic Feature management is enabled. + "metricsGcpServiceAccountEmail": "A String", # The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. "oci": { # OCI repo configuration for a single cluster # OCI repo configuration for the cluster "gcpServiceAccountEmail": "A String", # The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. "policyDir": "A String", # The absolute path of the directory that contains the local resources. Default: the root directory of the image. @@ -2082,14 +2074,10 @@

Method Details

}, }, "clusterupgrade": { # Per-membership state for this feature. # ClusterUpgrade state. - "fleet": "A String", # Project number or id of the fleet. It is set only for Memberships that are part of fleet-based Rollout Sequencing. "ignored": { # IgnoredMembership represents a membership ignored by the feature. A membership can be ignored because it was manually upgraded to a newer version than RC default. # Whether this membership is ignored by the feature. For example, manually upgraded clusters can be ignored if they are newer than the default versions of its release channel. "ignoredTime": "A String", # Time when the membership was first set to ignored. "reason": "A String", # Reason why the membership is ignored. }, - "scopes": [ # Fully qualified scope names that this clusters is bound to which also have rollout sequencing enabled. - "A String", - ], "upgrades": [ # Actual upgrade state against desired. { # ScopeGKEUpgradeState is a GKEUpgrade and its state per-membership. "status": { # UpgradeStatus provides status information for each upgrade. # Status of the upgrade. @@ -2191,7 +2179,7 @@

Method Details

"syncRev": "A String", # Git revision (tag or hash) to check out. Default HEAD. "syncWaitSecs": "A String", # Period in seconds between consecutive syncs. Default: 15. }, - "metricsGcpServiceAccountEmail": "A String", # The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. This field is required when automatic Feature management is enabled. + "metricsGcpServiceAccountEmail": "A String", # The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. "oci": { # OCI repo configuration for a single cluster # OCI repo configuration for the cluster "gcpServiceAccountEmail": "A String", # The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. "policyDir": "A String", # The absolute path of the directory that contains the local resources. Default: the root directory of the image. @@ -2542,7 +2530,7 @@

Method Details

"syncRev": "A String", # Git revision (tag or hash) to check out. Default HEAD. "syncWaitSecs": "A String", # Period in seconds between consecutive syncs. Default: 15. }, - "metricsGcpServiceAccountEmail": "A String", # The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. This field is required when automatic Feature management is enabled. + "metricsGcpServiceAccountEmail": "A String", # The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. "oci": { # OCI repo configuration for a single cluster # OCI repo configuration for the cluster "gcpServiceAccountEmail": "A String", # The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. "policyDir": "A String", # The absolute path of the directory that contains the local resources. Default: the root directory of the image. @@ -2704,7 +2692,7 @@

Method Details

"syncRev": "A String", # Git revision (tag or hash) to check out. Default HEAD. "syncWaitSecs": "A String", # Period in seconds between consecutive syncs. Default: 15. }, - "metricsGcpServiceAccountEmail": "A String", # The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. This field is required when automatic Feature management is enabled. + "metricsGcpServiceAccountEmail": "A String", # The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. "oci": { # OCI repo configuration for a single cluster # OCI repo configuration for the cluster "gcpServiceAccountEmail": "A String", # The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. "policyDir": "A String", # The absolute path of the directory that contains the local resources. Default: the root directory of the image. @@ -2849,14 +2837,10 @@

Method Details

}, }, "clusterupgrade": { # Per-membership state for this feature. # ClusterUpgrade state. - "fleet": "A String", # Project number or id of the fleet. It is set only for Memberships that are part of fleet-based Rollout Sequencing. "ignored": { # IgnoredMembership represents a membership ignored by the feature. A membership can be ignored because it was manually upgraded to a newer version than RC default. # Whether this membership is ignored by the feature. For example, manually upgraded clusters can be ignored if they are newer than the default versions of its release channel. "ignoredTime": "A String", # Time when the membership was first set to ignored. "reason": "A String", # Reason why the membership is ignored. }, - "scopes": [ # Fully qualified scope names that this clusters is bound to which also have rollout sequencing enabled. - "A String", - ], "upgrades": [ # Actual upgrade state against desired. { # ScopeGKEUpgradeState is a GKEUpgrade and its state per-membership. "status": { # UpgradeStatus provides status information for each upgrade. # Status of the upgrade. @@ -2958,7 +2942,7 @@

Method Details

"syncRev": "A String", # Git revision (tag or hash) to check out. Default HEAD. "syncWaitSecs": "A String", # Period in seconds between consecutive syncs. Default: 15. }, - "metricsGcpServiceAccountEmail": "A String", # The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. This field is required when automatic Feature management is enabled. + "metricsGcpServiceAccountEmail": "A String", # The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. "oci": { # OCI repo configuration for a single cluster # OCI repo configuration for the cluster "gcpServiceAccountEmail": "A String", # The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. "policyDir": "A String", # The absolute path of the directory that contains the local resources. Default: the root directory of the image. diff --git a/docs/dyn/gkeonprem_v1.projects.locations.vmwareAdminClusters.html b/docs/dyn/gkeonprem_v1.projects.locations.vmwareAdminClusters.html index 9f7d99c4414..a0a24860f9d 100644 --- a/docs/dyn/gkeonprem_v1.projects.locations.vmwareAdminClusters.html +++ b/docs/dyn/gkeonprem_v1.projects.locations.vmwareAdminClusters.html @@ -367,6 +367,7 @@

Method Details

"datastore": "A String", # The name of the vCenter datastore for the admin cluster. "folder": "A String", # The name of the vCenter folder for the admin cluster. "resourcePool": "A String", # The name of the vCenter resource pool for the admin cluster. + "storagePolicyName": "A String", # The name of the vCenter storage policy for the user cluster. }, }
@@ -623,6 +624,7 @@

Method Details

"datastore": "A String", # The name of the vCenter datastore for the admin cluster. "folder": "A String", # The name of the vCenter folder for the admin cluster. "resourcePool": "A String", # The name of the vCenter resource pool for the admin cluster. + "storagePolicyName": "A String", # The name of the vCenter storage policy for the user cluster. }, }, ], @@ -842,6 +844,7 @@

Method Details

"datastore": "A String", # The name of the vCenter datastore for the admin cluster. "folder": "A String", # The name of the vCenter folder for the admin cluster. "resourcePool": "A String", # The name of the vCenter resource pool for the admin cluster. + "storagePolicyName": "A String", # The name of the vCenter storage policy for the user cluster. }, } diff --git a/docs/dyn/gkeonprem_v1.projects.locations.vmwareClusters.html b/docs/dyn/gkeonprem_v1.projects.locations.vmwareClusters.html index 631ebc74797..0f924e1a876 100644 --- a/docs/dyn/gkeonprem_v1.projects.locations.vmwareClusters.html +++ b/docs/dyn/gkeonprem_v1.projects.locations.vmwareClusters.html @@ -283,7 +283,7 @@

Method Details

}, ], }, - "vcenterNetwork": "A String", # vcenter_network specifies vCenter network name. Inherited from the admin cluster. + "vcenterNetwork": "A String", # Output only. vcenter_network specifies vCenter network name. Inherited from the admin cluster. }, "onPremVersion": "A String", # Required. The Anthos clusters on the VMware version for your user cluster. "reconciling": True or False, # Output only. If set, there are currently changes in flight to the VMware user cluster. @@ -616,7 +616,7 @@

Method Details

}, ], }, - "vcenterNetwork": "A String", # vcenter_network specifies vCenter network name. Inherited from the admin cluster. + "vcenterNetwork": "A String", # Output only. vcenter_network specifies vCenter network name. Inherited from the admin cluster. }, "onPremVersion": "A String", # Required. The Anthos clusters on the VMware version for your user cluster. "reconciling": True or False, # Output only. If set, there are currently changes in flight to the VMware user cluster. @@ -878,7 +878,7 @@

Method Details

}, ], }, - "vcenterNetwork": "A String", # vcenter_network specifies vCenter network name. Inherited from the admin cluster. + "vcenterNetwork": "A String", # Output only. vcenter_network specifies vCenter network name. Inherited from the admin cluster. }, "onPremVersion": "A String", # Required. The Anthos clusters on the VMware version for your user cluster. "reconciling": True or False, # Output only. If set, there are currently changes in flight to the VMware user cluster. @@ -1102,7 +1102,7 @@

Method Details

}, ], }, - "vcenterNetwork": "A String", # vcenter_network specifies vCenter network name. Inherited from the admin cluster. + "vcenterNetwork": "A String", # Output only. vcenter_network specifies vCenter network name. Inherited from the admin cluster. }, "onPremVersion": "A String", # Required. The Anthos clusters on the VMware version for your user cluster. "reconciling": True or False, # Output only. If set, there are currently changes in flight to the VMware user cluster. diff --git a/docs/dyn/healthcare_v1beta1.projects.locations.datasets.fhirStores.html b/docs/dyn/healthcare_v1beta1.projects.locations.datasets.fhirStores.html index 74b23558063..cc83c15f287 100644 --- a/docs/dyn/healthcare_v1beta1.projects.locations.datasets.fhirStores.html +++ b/docs/dyn/healthcare_v1beta1.projects.locations.datasets.fhirStores.html @@ -297,7 +297,7 @@

Method Details

"accessDeterminationLogConfig": { # Configures consent audit log config for FHIR create, read, update, and delete (CRUD) operations. Cloud audit log for healthcare API must be [enabled](https://cloud.google.com/logging/docs/audit/configure-data-access#config-console-enable). The consent-related logs are included as part of `protoPayload.metadata`. # Optional. Specifies how the server logs the consent-aware requests. If not specified, the `AccessDeterminationLogConfig.LogLevel.MINIMUM` option is used. "logLevel": "A String", # Optional. Controls the amount of detail to include as part of the audit logs. }, - "accessEnforced": True or False, # Optional. If set to true, when accessing FHIR resources, the consent headers provided using [SMART-on-FHIR](https://cloud.google.com/healthcare/private/docs/how-tos/smart-on-fhir) will be verified against consents given by patients. See the ConsentEnforcementVersion for the supported consent headers. + "accessEnforced": True or False, # Optional. If set to true, when accessing FHIR resources, the consent headers will be verified against consents given by patients. See the ConsentEnforcementVersion for the supported consent headers. "consentHeaderHandling": { # How the server handles the consent header. # Optional. Different options to configure the behaviour of the server when handling the `X-Consent-Scope` header. "profile": "A String", # Optional. Specifies the default server behavior when the header is empty. If not specified, the `ScopeProfile.PERMIT_EMPTY_SCOPE` option is used. }, @@ -583,7 +583,7 @@

Method Details

"accessDeterminationLogConfig": { # Configures consent audit log config for FHIR create, read, update, and delete (CRUD) operations. Cloud audit log for healthcare API must be [enabled](https://cloud.google.com/logging/docs/audit/configure-data-access#config-console-enable). The consent-related logs are included as part of `protoPayload.metadata`. # Optional. Specifies how the server logs the consent-aware requests. If not specified, the `AccessDeterminationLogConfig.LogLevel.MINIMUM` option is used. "logLevel": "A String", # Optional. Controls the amount of detail to include as part of the audit logs. }, - "accessEnforced": True or False, # Optional. If set to true, when accessing FHIR resources, the consent headers provided using [SMART-on-FHIR](https://cloud.google.com/healthcare/private/docs/how-tos/smart-on-fhir) will be verified against consents given by patients. See the ConsentEnforcementVersion for the supported consent headers. + "accessEnforced": True or False, # Optional. If set to true, when accessing FHIR resources, the consent headers will be verified against consents given by patients. See the ConsentEnforcementVersion for the supported consent headers. "consentHeaderHandling": { # How the server handles the consent header. # Optional. Different options to configure the behaviour of the server when handling the `X-Consent-Scope` header. "profile": "A String", # Optional. Specifies the default server behavior when the header is empty. If not specified, the `ScopeProfile.PERMIT_EMPTY_SCOPE` option is used. }, @@ -1203,7 +1203,7 @@

Method Details

"accessDeterminationLogConfig": { # Configures consent audit log config for FHIR create, read, update, and delete (CRUD) operations. Cloud audit log for healthcare API must be [enabled](https://cloud.google.com/logging/docs/audit/configure-data-access#config-console-enable). The consent-related logs are included as part of `protoPayload.metadata`. # Optional. Specifies how the server logs the consent-aware requests. If not specified, the `AccessDeterminationLogConfig.LogLevel.MINIMUM` option is used. "logLevel": "A String", # Optional. Controls the amount of detail to include as part of the audit logs. }, - "accessEnforced": True or False, # Optional. If set to true, when accessing FHIR resources, the consent headers provided using [SMART-on-FHIR](https://cloud.google.com/healthcare/private/docs/how-tos/smart-on-fhir) will be verified against consents given by patients. See the ConsentEnforcementVersion for the supported consent headers. + "accessEnforced": True or False, # Optional. If set to true, when accessing FHIR resources, the consent headers will be verified against consents given by patients. See the ConsentEnforcementVersion for the supported consent headers. "consentHeaderHandling": { # How the server handles the consent header. # Optional. Different options to configure the behaviour of the server when handling the `X-Consent-Scope` header. "profile": "A String", # Optional. Specifies the default server behavior when the header is empty. If not specified, the `ScopeProfile.PERMIT_EMPTY_SCOPE` option is used. }, @@ -1619,7 +1619,7 @@

Method Details

"accessDeterminationLogConfig": { # Configures consent audit log config for FHIR create, read, update, and delete (CRUD) operations. Cloud audit log for healthcare API must be [enabled](https://cloud.google.com/logging/docs/audit/configure-data-access#config-console-enable). The consent-related logs are included as part of `protoPayload.metadata`. # Optional. Specifies how the server logs the consent-aware requests. If not specified, the `AccessDeterminationLogConfig.LogLevel.MINIMUM` option is used. "logLevel": "A String", # Optional. Controls the amount of detail to include as part of the audit logs. }, - "accessEnforced": True or False, # Optional. If set to true, when accessing FHIR resources, the consent headers provided using [SMART-on-FHIR](https://cloud.google.com/healthcare/private/docs/how-tos/smart-on-fhir) will be verified against consents given by patients. See the ConsentEnforcementVersion for the supported consent headers. + "accessEnforced": True or False, # Optional. If set to true, when accessing FHIR resources, the consent headers will be verified against consents given by patients. See the ConsentEnforcementVersion for the supported consent headers. "consentHeaderHandling": { # How the server handles the consent header. # Optional. Different options to configure the behaviour of the server when handling the `X-Consent-Scope` header. "profile": "A String", # Optional. Specifies the default server behavior when the header is empty. If not specified, the `ScopeProfile.PERMIT_EMPTY_SCOPE` option is used. }, @@ -1923,7 +1923,7 @@

Method Details

"accessDeterminationLogConfig": { # Configures consent audit log config for FHIR create, read, update, and delete (CRUD) operations. Cloud audit log for healthcare API must be [enabled](https://cloud.google.com/logging/docs/audit/configure-data-access#config-console-enable). The consent-related logs are included as part of `protoPayload.metadata`. # Optional. Specifies how the server logs the consent-aware requests. If not specified, the `AccessDeterminationLogConfig.LogLevel.MINIMUM` option is used. "logLevel": "A String", # Optional. Controls the amount of detail to include as part of the audit logs. }, - "accessEnforced": True or False, # Optional. If set to true, when accessing FHIR resources, the consent headers provided using [SMART-on-FHIR](https://cloud.google.com/healthcare/private/docs/how-tos/smart-on-fhir) will be verified against consents given by patients. See the ConsentEnforcementVersion for the supported consent headers. + "accessEnforced": True or False, # Optional. If set to true, when accessing FHIR resources, the consent headers will be verified against consents given by patients. See the ConsentEnforcementVersion for the supported consent headers. "consentHeaderHandling": { # How the server handles the consent header. # Optional. Different options to configure the behaviour of the server when handling the `X-Consent-Scope` header. "profile": "A String", # Optional. Specifies the default server behavior when the header is empty. If not specified, the `ScopeProfile.PERMIT_EMPTY_SCOPE` option is used. }, @@ -2209,7 +2209,7 @@

Method Details

"accessDeterminationLogConfig": { # Configures consent audit log config for FHIR create, read, update, and delete (CRUD) operations. Cloud audit log for healthcare API must be [enabled](https://cloud.google.com/logging/docs/audit/configure-data-access#config-console-enable). The consent-related logs are included as part of `protoPayload.metadata`. # Optional. Specifies how the server logs the consent-aware requests. If not specified, the `AccessDeterminationLogConfig.LogLevel.MINIMUM` option is used. "logLevel": "A String", # Optional. Controls the amount of detail to include as part of the audit logs. }, - "accessEnforced": True or False, # Optional. If set to true, when accessing FHIR resources, the consent headers provided using [SMART-on-FHIR](https://cloud.google.com/healthcare/private/docs/how-tos/smart-on-fhir) will be verified against consents given by patients. See the ConsentEnforcementVersion for the supported consent headers. + "accessEnforced": True or False, # Optional. If set to true, when accessing FHIR resources, the consent headers will be verified against consents given by patients. See the ConsentEnforcementVersion for the supported consent headers. "consentHeaderHandling": { # How the server handles the consent header. # Optional. Different options to configure the behaviour of the server when handling the `X-Consent-Scope` header. "profile": "A String", # Optional. Specifies the default server behavior when the header is empty. If not specified, the `ScopeProfile.PERMIT_EMPTY_SCOPE` option is used. }, diff --git a/docs/dyn/index.md b/docs/dyn/index.md index e5ff84f86f3..3010a1182fb 100644 --- a/docs/dyn/index.md +++ b/docs/dyn/index.md @@ -126,6 +126,7 @@ ## apphub +* [v1](http://googleapis.github.io/google-api-python-client/docs/dyn/apphub_v1.html) * [v1alpha](http://googleapis.github.io/google-api-python-client/docs/dyn/apphub_v1alpha.html) @@ -570,6 +571,7 @@ ## firebaseappdistribution * [v1](http://googleapis.github.io/google-api-python-client/docs/dyn/firebaseappdistribution_v1.html) +* [v1alpha](http://googleapis.github.io/google-api-python-client/docs/dyn/firebaseappdistribution_v1alpha.html) ## firebasedatabase diff --git a/docs/dyn/logging_v2.billingAccounts.html b/docs/dyn/logging_v2.billingAccounts.html index 3ac77a7b82d..6ea831ef62d 100644 --- a/docs/dyn/logging_v2.billingAccounts.html +++ b/docs/dyn/logging_v2.billingAccounts.html @@ -99,10 +99,10 @@

Instance Methods

Close httplib2 connections.

getCmekSettings(name, x__xgafv=None)

-

Gets the Logging CMEK settings for the given resource.Note: CMEK for the Log Router can be configured for Google Cloud projects, folders, organizations and billing accounts. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.

+

Gets the Logging CMEK settings for the given resource.Note: CMEK for the Log Router can be configured for Google Cloud projects, folders, organizations, and billing accounts. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.

getSettings(name, x__xgafv=None)

-

Gets the Log Router settings for the given resource.Note: Settings for the Log Router can be get for Google Cloud projects, folders, organizations and billing accounts. Currently it can only be configured for organizations. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.

+

Gets the settings for the given resource.Note: Settings can be retrieved for Google Cloud projects, folders, organizations, and billing accounts.See View default resource settings for Logging (https://cloud.google.com/logging/docs/default-settings#view-org-settings) for more information.

Method Details

close() @@ -111,10 +111,10 @@

Method Details

getCmekSettings(name, x__xgafv=None) -
Gets the Logging CMEK settings for the given resource.Note: CMEK for the Log Router can be configured for Google Cloud projects, folders, organizations and billing accounts. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.
+  
Gets the Logging CMEK settings for the given resource.Note: CMEK for the Log Router can be configured for Google Cloud projects, folders, organizations, and billing accounts. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.
 
 Args:
-  name: string, Required. The resource for which to retrieve CMEK settings. "projects/[PROJECT_ID]/cmekSettings" "organizations/[ORGANIZATION_ID]/cmekSettings" "billingAccounts/[BILLING_ACCOUNT_ID]/cmekSettings" "folders/[FOLDER_ID]/cmekSettings" For example:"organizations/12345/cmekSettings"Note: CMEK for the Log Router can be configured for Google Cloud projects, folders, organizations and billing accounts. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization. (required)
+  name: string, Required. The resource for which to retrieve CMEK settings. "projects/[PROJECT_ID]/cmekSettings" "organizations/[ORGANIZATION_ID]/cmekSettings" "billingAccounts/[BILLING_ACCOUNT_ID]/cmekSettings" "folders/[FOLDER_ID]/cmekSettings" For example:"organizations/12345/cmekSettings"Note: CMEK for the Log Router can be configured for Google Cloud projects, folders, organizations, and billing accounts. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization. (required)
   x__xgafv: string, V1 error format.
     Allowed values
       1 - v1 error format
@@ -133,10 +133,10 @@ 

Method Details

getSettings(name, x__xgafv=None) -
Gets the Log Router settings for the given resource.Note: Settings for the Log Router can be get for Google Cloud projects, folders, organizations and billing accounts. Currently it can only be configured for organizations. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.
+  
Gets the settings for the given resource.Note: Settings can be retrieved for Google Cloud projects, folders, organizations, and billing accounts.See View default resource settings for Logging (https://cloud.google.com/logging/docs/default-settings#view-org-settings) for more information.
 
 Args:
-  name: string, Required. The resource for which to retrieve settings. "projects/[PROJECT_ID]/settings" "organizations/[ORGANIZATION_ID]/settings" "billingAccounts/[BILLING_ACCOUNT_ID]/settings" "folders/[FOLDER_ID]/settings" For example:"organizations/12345/settings"Note: Settings for the Log Router can be get for Google Cloud projects, folders, organizations and billing accounts. Currently it can only be configured for organizations. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization. (required)
+  name: string, Required. The resource for which to retrieve settings. "projects/[PROJECT_ID]/settings" "organizations/[ORGANIZATION_ID]/settings" "billingAccounts/[BILLING_ACCOUNT_ID]/settings" "folders/[FOLDER_ID]/settings" For example:"organizations/12345/settings"Note: Settings can be retrieved for Google Cloud projects, folders, organizations, and billing accounts. (required)
   x__xgafv: string, V1 error format.
     Allowed values
       1 - v1 error format
@@ -145,7 +145,7 @@ 

Method Details

Returns: An object of the form: - { # Describes the settings associated with a project, folder, organization, billing account, or flexible resource. + { # Describes the settings associated with a project, folder, organization, or billing account. "defaultSinkConfig": { # Describes the custom _Default sink configuration that is used to override the built-in _Default sink configuration in newly created resource containers, such as projects or folders. # Optional. Overrides the built-in configuration for _Default sink. "exclusions": [ # Optional. Specifies the set of exclusions to be added to the _Default sink in newly created resource containers. { # Specifies a set of log entries that are filtered out by a sink. If your Google Cloud resource receives a large volume of log entries, you can use exclusions to reduce your chargeable logs. Note that exclusions on organization-level and folder-level sinks don't apply to child resources. Note also that you cannot modify the _Required sink or exclude logs from it. @@ -161,8 +161,8 @@

Method Details

"mode": "A String", # Required. Determines the behavior to apply to the built-in _Default sink inclusion filter.Exclusions are always appended, as built-in _Default sinks have no exclusions. }, "disableDefaultSink": True or False, # Optional. If set to true, the _Default sink in newly created projects and folders will created in a disabled state. This can be used to automatically disable log storage if there is already an aggregated sink configured in the hierarchy. The _Default sink can be re-enabled manually if needed. - "kmsKeyName": "A String", # Optional. The resource name for the configured Cloud KMS key.KMS key name format: "projects/[PROJECT_ID]/locations/[LOCATION]/keyRings/[KEYRING]/cryptoKeys/[KEY]" For example:"projects/my-project/locations/us-central1/keyRings/my-ring/cryptoKeys/my-key"To enable CMEK for the Log Router, set this field to a valid kms_key_name for which the associated service account has the required roles/cloudkms.cryptoKeyEncrypterDecrypter role assigned for the key.The Cloud KMS key used by the Log Router can be updated by changing the kms_key_name to a new valid key name. Encryption operations that are in progress will be completed with the key that was in use when they started. Decryption operations will be completed using the key that was used at the time of encryption unless access to that key has been revoked.To disable CMEK for the Log Router, set this field to an empty string.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information. - "kmsServiceAccountId": "A String", # Output only. The service account that will be used by the Log Router to access your Cloud KMS key.Before enabling CMEK for Log Router, you must first assign the role roles/cloudkms.cryptoKeyEncrypterDecrypter to the service account that the Log Router will use to access your Cloud KMS key. Use GetSettings to obtain the service account ID.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information. + "kmsKeyName": "A String", # Optional. The resource name for the configured Cloud KMS key.KMS key name format: "projects/[PROJECT_ID]/locations/[LOCATION]/keyRings/[KEYRING]/cryptoKeys/[KEY]" For example:"projects/my-project/locations/us-central1/keyRings/my-ring/cryptoKeys/my-key"To enable CMEK, set this field to a valid kms_key_name for which the associated service account has the required roles/cloudkms.cryptoKeyEncrypterDecrypter role assigned for the key.The Cloud KMS key used by the Log Router can be updated by changing the kms_key_name to a new valid key name.To disable CMEK for the Log Router, set this field to an empty string.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information. + "kmsServiceAccountId": "A String", # Output only. The service account that will be used by the Log Router to access your Cloud KMS key.Before enabling CMEK, you must first assign the role roles/cloudkms.cryptoKeyEncrypterDecrypter to the service account that will be used to access your Cloud KMS key. Use GetSettings to obtain the service account ID.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information. "loggingServiceAccountId": "A String", # Output only. The service account for the given resource container, such as project or folder. Log sinks use this service account as their writer_identity if no custom service account is provided in the request when calling the create sink method. "name": "A String", # Output only. The resource name of the settings. "storageLocation": "A String", # Optional. The storage location that Cloud Logging will use to create new resources when a location is needed but not explicitly provided. The use cases includes: The location of _Default and _Required log bucket for newly created projects and folders.Example value: europe-west1.Note: this setting does not affect the location of resources where a location is explicitly provided when created, such as custom log buckets. diff --git a/docs/dyn/logging_v2.folders.html b/docs/dyn/logging_v2.folders.html index d2169083a9e..92108ffcc3f 100644 --- a/docs/dyn/logging_v2.folders.html +++ b/docs/dyn/logging_v2.folders.html @@ -99,13 +99,13 @@

Instance Methods

Close httplib2 connections.

getCmekSettings(name, x__xgafv=None)

-

Gets the Logging CMEK settings for the given resource.Note: CMEK for the Log Router can be configured for Google Cloud projects, folders, organizations and billing accounts. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.

+

Gets the Logging CMEK settings for the given resource.Note: CMEK for the Log Router can be configured for Google Cloud projects, folders, organizations, and billing accounts. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.

getSettings(name, x__xgafv=None)

-

Gets the Log Router settings for the given resource.Note: Settings for the Log Router can be get for Google Cloud projects, folders, organizations and billing accounts. Currently it can only be configured for organizations. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.

+

Gets the settings for the given resource.Note: Settings can be retrieved for Google Cloud projects, folders, organizations, and billing accounts.See View default resource settings for Logging (https://cloud.google.com/logging/docs/default-settings#view-org-settings) for more information.

updateSettings(name, body=None, updateMask=None, x__xgafv=None)

-

Updates the Log Router settings for the given resource.Note: Settings for the Log Router can currently only be configured for Google Cloud organizations. Once configured, it applies to all projects and folders in the Google Cloud organization.UpdateSettings will fail if 1) kms_key_name is invalid, or 2) the associated service account does not have the required roles/cloudkms.cryptoKeyEncrypterDecrypter role assigned for the key, or 3) access to the key is disabled. 4) location_id is not supported by Logging. 5) location_id violate OrgPolicy.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.

+

Updates the settings for the given resource. This method applies to all feature configurations for organization and folders.UpdateSettings will fail if 1) kms_key_name is invalid, 2) the associated service account does not have the required roles/cloudkms.cryptoKeyEncrypterDecrypter role assigned for the key, 3) access to the key is disabled, 4) storage_location is not supported by Logging, 5) storage_location violates the location OrgPolicy, or 6) default_sink_config is set but has an unspecified filter write mode.See Configure default settings for organizations and folders (https://cloud.google.com/logging/docs/default-settings) for more information.

Method Details

close() @@ -114,10 +114,10 @@

Method Details

getCmekSettings(name, x__xgafv=None) -
Gets the Logging CMEK settings for the given resource.Note: CMEK for the Log Router can be configured for Google Cloud projects, folders, organizations and billing accounts. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.
+  
Gets the Logging CMEK settings for the given resource.Note: CMEK for the Log Router can be configured for Google Cloud projects, folders, organizations, and billing accounts. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.
 
 Args:
-  name: string, Required. The resource for which to retrieve CMEK settings. "projects/[PROJECT_ID]/cmekSettings" "organizations/[ORGANIZATION_ID]/cmekSettings" "billingAccounts/[BILLING_ACCOUNT_ID]/cmekSettings" "folders/[FOLDER_ID]/cmekSettings" For example:"organizations/12345/cmekSettings"Note: CMEK for the Log Router can be configured for Google Cloud projects, folders, organizations and billing accounts. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization. (required)
+  name: string, Required. The resource for which to retrieve CMEK settings. "projects/[PROJECT_ID]/cmekSettings" "organizations/[ORGANIZATION_ID]/cmekSettings" "billingAccounts/[BILLING_ACCOUNT_ID]/cmekSettings" "folders/[FOLDER_ID]/cmekSettings" For example:"organizations/12345/cmekSettings"Note: CMEK for the Log Router can be configured for Google Cloud projects, folders, organizations, and billing accounts. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization. (required)
   x__xgafv: string, V1 error format.
     Allowed values
       1 - v1 error format
@@ -136,10 +136,10 @@ 

Method Details

getSettings(name, x__xgafv=None) -
Gets the Log Router settings for the given resource.Note: Settings for the Log Router can be get for Google Cloud projects, folders, organizations and billing accounts. Currently it can only be configured for organizations. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.
+  
Gets the settings for the given resource.Note: Settings can be retrieved for Google Cloud projects, folders, organizations, and billing accounts.See View default resource settings for Logging (https://cloud.google.com/logging/docs/default-settings#view-org-settings) for more information.
 
 Args:
-  name: string, Required. The resource for which to retrieve settings. "projects/[PROJECT_ID]/settings" "organizations/[ORGANIZATION_ID]/settings" "billingAccounts/[BILLING_ACCOUNT_ID]/settings" "folders/[FOLDER_ID]/settings" For example:"organizations/12345/settings"Note: Settings for the Log Router can be get for Google Cloud projects, folders, organizations and billing accounts. Currently it can only be configured for organizations. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization. (required)
+  name: string, Required. The resource for which to retrieve settings. "projects/[PROJECT_ID]/settings" "organizations/[ORGANIZATION_ID]/settings" "billingAccounts/[BILLING_ACCOUNT_ID]/settings" "folders/[FOLDER_ID]/settings" For example:"organizations/12345/settings"Note: Settings can be retrieved for Google Cloud projects, folders, organizations, and billing accounts. (required)
   x__xgafv: string, V1 error format.
     Allowed values
       1 - v1 error format
@@ -148,7 +148,7 @@ 

Method Details

Returns: An object of the form: - { # Describes the settings associated with a project, folder, organization, billing account, or flexible resource. + { # Describes the settings associated with a project, folder, organization, or billing account. "defaultSinkConfig": { # Describes the custom _Default sink configuration that is used to override the built-in _Default sink configuration in newly created resource containers, such as projects or folders. # Optional. Overrides the built-in configuration for _Default sink. "exclusions": [ # Optional. Specifies the set of exclusions to be added to the _Default sink in newly created resource containers. { # Specifies a set of log entries that are filtered out by a sink. If your Google Cloud resource receives a large volume of log entries, you can use exclusions to reduce your chargeable logs. Note that exclusions on organization-level and folder-level sinks don't apply to child resources. Note also that you cannot modify the _Required sink or exclude logs from it. @@ -164,8 +164,8 @@

Method Details

"mode": "A String", # Required. Determines the behavior to apply to the built-in _Default sink inclusion filter.Exclusions are always appended, as built-in _Default sinks have no exclusions. }, "disableDefaultSink": True or False, # Optional. If set to true, the _Default sink in newly created projects and folders will created in a disabled state. This can be used to automatically disable log storage if there is already an aggregated sink configured in the hierarchy. The _Default sink can be re-enabled manually if needed. - "kmsKeyName": "A String", # Optional. The resource name for the configured Cloud KMS key.KMS key name format: "projects/[PROJECT_ID]/locations/[LOCATION]/keyRings/[KEYRING]/cryptoKeys/[KEY]" For example:"projects/my-project/locations/us-central1/keyRings/my-ring/cryptoKeys/my-key"To enable CMEK for the Log Router, set this field to a valid kms_key_name for which the associated service account has the required roles/cloudkms.cryptoKeyEncrypterDecrypter role assigned for the key.The Cloud KMS key used by the Log Router can be updated by changing the kms_key_name to a new valid key name. Encryption operations that are in progress will be completed with the key that was in use when they started. Decryption operations will be completed using the key that was used at the time of encryption unless access to that key has been revoked.To disable CMEK for the Log Router, set this field to an empty string.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information. - "kmsServiceAccountId": "A String", # Output only. The service account that will be used by the Log Router to access your Cloud KMS key.Before enabling CMEK for Log Router, you must first assign the role roles/cloudkms.cryptoKeyEncrypterDecrypter to the service account that the Log Router will use to access your Cloud KMS key. Use GetSettings to obtain the service account ID.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information. + "kmsKeyName": "A String", # Optional. The resource name for the configured Cloud KMS key.KMS key name format: "projects/[PROJECT_ID]/locations/[LOCATION]/keyRings/[KEYRING]/cryptoKeys/[KEY]" For example:"projects/my-project/locations/us-central1/keyRings/my-ring/cryptoKeys/my-key"To enable CMEK, set this field to a valid kms_key_name for which the associated service account has the required roles/cloudkms.cryptoKeyEncrypterDecrypter role assigned for the key.The Cloud KMS key used by the Log Router can be updated by changing the kms_key_name to a new valid key name.To disable CMEK for the Log Router, set this field to an empty string.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information. + "kmsServiceAccountId": "A String", # Output only. The service account that will be used by the Log Router to access your Cloud KMS key.Before enabling CMEK, you must first assign the role roles/cloudkms.cryptoKeyEncrypterDecrypter to the service account that will be used to access your Cloud KMS key. Use GetSettings to obtain the service account ID.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information. "loggingServiceAccountId": "A String", # Output only. The service account for the given resource container, such as project or folder. Log sinks use this service account as their writer_identity if no custom service account is provided in the request when calling the create sink method. "name": "A String", # Output only. The resource name of the settings. "storageLocation": "A String", # Optional. The storage location that Cloud Logging will use to create new resources when a location is needed but not explicitly provided. The use cases includes: The location of _Default and _Required log bucket for newly created projects and folders.Example value: europe-west1.Note: this setting does not affect the location of resources where a location is explicitly provided when created, such as custom log buckets. @@ -174,14 +174,14 @@

Method Details

updateSettings(name, body=None, updateMask=None, x__xgafv=None) -
Updates the Log Router settings for the given resource.Note: Settings for the Log Router can currently only be configured for Google Cloud organizations. Once configured, it applies to all projects and folders in the Google Cloud organization.UpdateSettings will fail if 1) kms_key_name is invalid, or 2) the associated service account does not have the required roles/cloudkms.cryptoKeyEncrypterDecrypter role assigned for the key, or 3) access to the key is disabled. 4) location_id is not supported by Logging. 5) location_id violate OrgPolicy.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.
+  
Updates the settings for the given resource. This method applies to all feature configurations for organization and folders.UpdateSettings will fail if 1) kms_key_name is invalid, 2) the associated service account does not have the required roles/cloudkms.cryptoKeyEncrypterDecrypter role assigned for the key, 3) access to the key is disabled, 4) storage_location is not supported by Logging, 5) storage_location violates the location OrgPolicy, or 6) default_sink_config is set but has an unspecified filter write mode.See Configure default settings for organizations and folders (https://cloud.google.com/logging/docs/default-settings) for more information.
 
 Args:
-  name: string, Required. The resource name for the settings to update. "organizations/[ORGANIZATION_ID]/settings" For example:"organizations/12345/settings"Note: Settings for the Log Router can currently only be configured for Google Cloud organizations. Once configured, it applies to all projects and folders in the Google Cloud organization. (required)
+  name: string, Required. The resource name for the settings to update. "organizations/[ORGANIZATION_ID]/settings" For example:"organizations/12345/settings" (required)
   body: object, The request body.
     The object takes the form of:
 
-{ # Describes the settings associated with a project, folder, organization, billing account, or flexible resource.
+{ # Describes the settings associated with a project, folder, organization, or billing account.
   "defaultSinkConfig": { # Describes the custom _Default sink configuration that is used to override the built-in _Default sink configuration in newly created resource containers, such as projects or folders. # Optional. Overrides the built-in configuration for _Default sink.
     "exclusions": [ # Optional. Specifies the set of exclusions to be added to the _Default sink in newly created resource containers.
       { # Specifies a set of log entries that are filtered out by a sink. If your Google Cloud resource receives a large volume of log entries, you can use exclusions to reduce your chargeable logs. Note that exclusions on organization-level and folder-level sinks don't apply to child resources. Note also that you cannot modify the _Required sink or exclude logs from it.
@@ -197,8 +197,8 @@ 

Method Details

"mode": "A String", # Required. Determines the behavior to apply to the built-in _Default sink inclusion filter.Exclusions are always appended, as built-in _Default sinks have no exclusions. }, "disableDefaultSink": True or False, # Optional. If set to true, the _Default sink in newly created projects and folders will created in a disabled state. This can be used to automatically disable log storage if there is already an aggregated sink configured in the hierarchy. The _Default sink can be re-enabled manually if needed. - "kmsKeyName": "A String", # Optional. The resource name for the configured Cloud KMS key.KMS key name format: "projects/[PROJECT_ID]/locations/[LOCATION]/keyRings/[KEYRING]/cryptoKeys/[KEY]" For example:"projects/my-project/locations/us-central1/keyRings/my-ring/cryptoKeys/my-key"To enable CMEK for the Log Router, set this field to a valid kms_key_name for which the associated service account has the required roles/cloudkms.cryptoKeyEncrypterDecrypter role assigned for the key.The Cloud KMS key used by the Log Router can be updated by changing the kms_key_name to a new valid key name. Encryption operations that are in progress will be completed with the key that was in use when they started. Decryption operations will be completed using the key that was used at the time of encryption unless access to that key has been revoked.To disable CMEK for the Log Router, set this field to an empty string.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information. - "kmsServiceAccountId": "A String", # Output only. The service account that will be used by the Log Router to access your Cloud KMS key.Before enabling CMEK for Log Router, you must first assign the role roles/cloudkms.cryptoKeyEncrypterDecrypter to the service account that the Log Router will use to access your Cloud KMS key. Use GetSettings to obtain the service account ID.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information. + "kmsKeyName": "A String", # Optional. The resource name for the configured Cloud KMS key.KMS key name format: "projects/[PROJECT_ID]/locations/[LOCATION]/keyRings/[KEYRING]/cryptoKeys/[KEY]" For example:"projects/my-project/locations/us-central1/keyRings/my-ring/cryptoKeys/my-key"To enable CMEK, set this field to a valid kms_key_name for which the associated service account has the required roles/cloudkms.cryptoKeyEncrypterDecrypter role assigned for the key.The Cloud KMS key used by the Log Router can be updated by changing the kms_key_name to a new valid key name.To disable CMEK for the Log Router, set this field to an empty string.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information. + "kmsServiceAccountId": "A String", # Output only. The service account that will be used by the Log Router to access your Cloud KMS key.Before enabling CMEK, you must first assign the role roles/cloudkms.cryptoKeyEncrypterDecrypter to the service account that will be used to access your Cloud KMS key. Use GetSettings to obtain the service account ID.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information. "loggingServiceAccountId": "A String", # Output only. The service account for the given resource container, such as project or folder. Log sinks use this service account as their writer_identity if no custom service account is provided in the request when calling the create sink method. "name": "A String", # Output only. The resource name of the settings. "storageLocation": "A String", # Optional. The storage location that Cloud Logging will use to create new resources when a location is needed but not explicitly provided. The use cases includes: The location of _Default and _Required log bucket for newly created projects and folders.Example value: europe-west1.Note: this setting does not affect the location of resources where a location is explicitly provided when created, such as custom log buckets. @@ -213,7 +213,7 @@

Method Details

Returns: An object of the form: - { # Describes the settings associated with a project, folder, organization, billing account, or flexible resource. + { # Describes the settings associated with a project, folder, organization, or billing account. "defaultSinkConfig": { # Describes the custom _Default sink configuration that is used to override the built-in _Default sink configuration in newly created resource containers, such as projects or folders. # Optional. Overrides the built-in configuration for _Default sink. "exclusions": [ # Optional. Specifies the set of exclusions to be added to the _Default sink in newly created resource containers. { # Specifies a set of log entries that are filtered out by a sink. If your Google Cloud resource receives a large volume of log entries, you can use exclusions to reduce your chargeable logs. Note that exclusions on organization-level and folder-level sinks don't apply to child resources. Note also that you cannot modify the _Required sink or exclude logs from it. @@ -229,8 +229,8 @@

Method Details

"mode": "A String", # Required. Determines the behavior to apply to the built-in _Default sink inclusion filter.Exclusions are always appended, as built-in _Default sinks have no exclusions. }, "disableDefaultSink": True or False, # Optional. If set to true, the _Default sink in newly created projects and folders will created in a disabled state. This can be used to automatically disable log storage if there is already an aggregated sink configured in the hierarchy. The _Default sink can be re-enabled manually if needed. - "kmsKeyName": "A String", # Optional. The resource name for the configured Cloud KMS key.KMS key name format: "projects/[PROJECT_ID]/locations/[LOCATION]/keyRings/[KEYRING]/cryptoKeys/[KEY]" For example:"projects/my-project/locations/us-central1/keyRings/my-ring/cryptoKeys/my-key"To enable CMEK for the Log Router, set this field to a valid kms_key_name for which the associated service account has the required roles/cloudkms.cryptoKeyEncrypterDecrypter role assigned for the key.The Cloud KMS key used by the Log Router can be updated by changing the kms_key_name to a new valid key name. Encryption operations that are in progress will be completed with the key that was in use when they started. Decryption operations will be completed using the key that was used at the time of encryption unless access to that key has been revoked.To disable CMEK for the Log Router, set this field to an empty string.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information. - "kmsServiceAccountId": "A String", # Output only. The service account that will be used by the Log Router to access your Cloud KMS key.Before enabling CMEK for Log Router, you must first assign the role roles/cloudkms.cryptoKeyEncrypterDecrypter to the service account that the Log Router will use to access your Cloud KMS key. Use GetSettings to obtain the service account ID.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information. + "kmsKeyName": "A String", # Optional. The resource name for the configured Cloud KMS key.KMS key name format: "projects/[PROJECT_ID]/locations/[LOCATION]/keyRings/[KEYRING]/cryptoKeys/[KEY]" For example:"projects/my-project/locations/us-central1/keyRings/my-ring/cryptoKeys/my-key"To enable CMEK, set this field to a valid kms_key_name for which the associated service account has the required roles/cloudkms.cryptoKeyEncrypterDecrypter role assigned for the key.The Cloud KMS key used by the Log Router can be updated by changing the kms_key_name to a new valid key name.To disable CMEK for the Log Router, set this field to an empty string.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information. + "kmsServiceAccountId": "A String", # Output only. The service account that will be used by the Log Router to access your Cloud KMS key.Before enabling CMEK, you must first assign the role roles/cloudkms.cryptoKeyEncrypterDecrypter to the service account that will be used to access your Cloud KMS key. Use GetSettings to obtain the service account ID.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information. "loggingServiceAccountId": "A String", # Output only. The service account for the given resource container, such as project or folder. Log sinks use this service account as their writer_identity if no custom service account is provided in the request when calling the create sink method. "name": "A String", # Output only. The resource name of the settings. "storageLocation": "A String", # Optional. The storage location that Cloud Logging will use to create new resources when a location is needed but not explicitly provided. The use cases includes: The location of _Default and _Required log bucket for newly created projects and folders.Example value: europe-west1.Note: this setting does not affect the location of resources where a location is explicitly provided when created, such as custom log buckets. diff --git a/docs/dyn/logging_v2.organizations.html b/docs/dyn/logging_v2.organizations.html index 338c501dc62..aca5797b5d6 100644 --- a/docs/dyn/logging_v2.organizations.html +++ b/docs/dyn/logging_v2.organizations.html @@ -99,16 +99,16 @@

Instance Methods

Close httplib2 connections.

getCmekSettings(name, x__xgafv=None)

-

Gets the Logging CMEK settings for the given resource.Note: CMEK for the Log Router can be configured for Google Cloud projects, folders, organizations and billing accounts. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.

+

Gets the Logging CMEK settings for the given resource.Note: CMEK for the Log Router can be configured for Google Cloud projects, folders, organizations, and billing accounts. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.

getSettings(name, x__xgafv=None)

-

Gets the Log Router settings for the given resource.Note: Settings for the Log Router can be get for Google Cloud projects, folders, organizations and billing accounts. Currently it can only be configured for organizations. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.

+

Gets the settings for the given resource.Note: Settings can be retrieved for Google Cloud projects, folders, organizations, and billing accounts.See View default resource settings for Logging (https://cloud.google.com/logging/docs/default-settings#view-org-settings) for more information.

updateCmekSettings(name, body=None, updateMask=None, x__xgafv=None)

Updates the Log Router CMEK settings for the given resource.Note: CMEK for the Log Router can currently only be configured for Google Cloud organizations. Once configured, it applies to all projects and folders in the Google Cloud organization.UpdateCmekSettings will fail if 1) kms_key_name is invalid, or 2) the associated service account does not have the required roles/cloudkms.cryptoKeyEncrypterDecrypter role assigned for the key, or 3) access to the key is disabled.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.

updateSettings(name, body=None, updateMask=None, x__xgafv=None)

-

Updates the Log Router settings for the given resource.Note: Settings for the Log Router can currently only be configured for Google Cloud organizations. Once configured, it applies to all projects and folders in the Google Cloud organization.UpdateSettings will fail if 1) kms_key_name is invalid, or 2) the associated service account does not have the required roles/cloudkms.cryptoKeyEncrypterDecrypter role assigned for the key, or 3) access to the key is disabled. 4) location_id is not supported by Logging. 5) location_id violate OrgPolicy.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.

+

Updates the settings for the given resource. This method applies to all feature configurations for organization and folders.UpdateSettings will fail if 1) kms_key_name is invalid, 2) the associated service account does not have the required roles/cloudkms.cryptoKeyEncrypterDecrypter role assigned for the key, 3) access to the key is disabled, 4) storage_location is not supported by Logging, 5) storage_location violates the location OrgPolicy, or 6) default_sink_config is set but has an unspecified filter write mode.See Configure default settings for organizations and folders (https://cloud.google.com/logging/docs/default-settings) for more information.

Method Details

close() @@ -117,10 +117,10 @@

Method Details

getCmekSettings(name, x__xgafv=None) -
Gets the Logging CMEK settings for the given resource.Note: CMEK for the Log Router can be configured for Google Cloud projects, folders, organizations and billing accounts. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.
+  
Gets the Logging CMEK settings for the given resource.Note: CMEK for the Log Router can be configured for Google Cloud projects, folders, organizations, and billing accounts. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.
 
 Args:
-  name: string, Required. The resource for which to retrieve CMEK settings. "projects/[PROJECT_ID]/cmekSettings" "organizations/[ORGANIZATION_ID]/cmekSettings" "billingAccounts/[BILLING_ACCOUNT_ID]/cmekSettings" "folders/[FOLDER_ID]/cmekSettings" For example:"organizations/12345/cmekSettings"Note: CMEK for the Log Router can be configured for Google Cloud projects, folders, organizations and billing accounts. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization. (required)
+  name: string, Required. The resource for which to retrieve CMEK settings. "projects/[PROJECT_ID]/cmekSettings" "organizations/[ORGANIZATION_ID]/cmekSettings" "billingAccounts/[BILLING_ACCOUNT_ID]/cmekSettings" "folders/[FOLDER_ID]/cmekSettings" For example:"organizations/12345/cmekSettings"Note: CMEK for the Log Router can be configured for Google Cloud projects, folders, organizations, and billing accounts. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization. (required)
   x__xgafv: string, V1 error format.
     Allowed values
       1 - v1 error format
@@ -139,10 +139,10 @@ 

Method Details

getSettings(name, x__xgafv=None) -
Gets the Log Router settings for the given resource.Note: Settings for the Log Router can be get for Google Cloud projects, folders, organizations and billing accounts. Currently it can only be configured for organizations. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.
+  
Gets the settings for the given resource.Note: Settings can be retrieved for Google Cloud projects, folders, organizations, and billing accounts.See View default resource settings for Logging (https://cloud.google.com/logging/docs/default-settings#view-org-settings) for more information.
 
 Args:
-  name: string, Required. The resource for which to retrieve settings. "projects/[PROJECT_ID]/settings" "organizations/[ORGANIZATION_ID]/settings" "billingAccounts/[BILLING_ACCOUNT_ID]/settings" "folders/[FOLDER_ID]/settings" For example:"organizations/12345/settings"Note: Settings for the Log Router can be get for Google Cloud projects, folders, organizations and billing accounts. Currently it can only be configured for organizations. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization. (required)
+  name: string, Required. The resource for which to retrieve settings. "projects/[PROJECT_ID]/settings" "organizations/[ORGANIZATION_ID]/settings" "billingAccounts/[BILLING_ACCOUNT_ID]/settings" "folders/[FOLDER_ID]/settings" For example:"organizations/12345/settings"Note: Settings can be retrieved for Google Cloud projects, folders, organizations, and billing accounts. (required)
   x__xgafv: string, V1 error format.
     Allowed values
       1 - v1 error format
@@ -151,7 +151,7 @@ 

Method Details

Returns: An object of the form: - { # Describes the settings associated with a project, folder, organization, billing account, or flexible resource. + { # Describes the settings associated with a project, folder, organization, or billing account. "defaultSinkConfig": { # Describes the custom _Default sink configuration that is used to override the built-in _Default sink configuration in newly created resource containers, such as projects or folders. # Optional. Overrides the built-in configuration for _Default sink. "exclusions": [ # Optional. Specifies the set of exclusions to be added to the _Default sink in newly created resource containers. { # Specifies a set of log entries that are filtered out by a sink. If your Google Cloud resource receives a large volume of log entries, you can use exclusions to reduce your chargeable logs. Note that exclusions on organization-level and folder-level sinks don't apply to child resources. Note also that you cannot modify the _Required sink or exclude logs from it. @@ -167,8 +167,8 @@

Method Details

"mode": "A String", # Required. Determines the behavior to apply to the built-in _Default sink inclusion filter.Exclusions are always appended, as built-in _Default sinks have no exclusions. }, "disableDefaultSink": True or False, # Optional. If set to true, the _Default sink in newly created projects and folders will created in a disabled state. This can be used to automatically disable log storage if there is already an aggregated sink configured in the hierarchy. The _Default sink can be re-enabled manually if needed. - "kmsKeyName": "A String", # Optional. The resource name for the configured Cloud KMS key.KMS key name format: "projects/[PROJECT_ID]/locations/[LOCATION]/keyRings/[KEYRING]/cryptoKeys/[KEY]" For example:"projects/my-project/locations/us-central1/keyRings/my-ring/cryptoKeys/my-key"To enable CMEK for the Log Router, set this field to a valid kms_key_name for which the associated service account has the required roles/cloudkms.cryptoKeyEncrypterDecrypter role assigned for the key.The Cloud KMS key used by the Log Router can be updated by changing the kms_key_name to a new valid key name. Encryption operations that are in progress will be completed with the key that was in use when they started. Decryption operations will be completed using the key that was used at the time of encryption unless access to that key has been revoked.To disable CMEK for the Log Router, set this field to an empty string.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information. - "kmsServiceAccountId": "A String", # Output only. The service account that will be used by the Log Router to access your Cloud KMS key.Before enabling CMEK for Log Router, you must first assign the role roles/cloudkms.cryptoKeyEncrypterDecrypter to the service account that the Log Router will use to access your Cloud KMS key. Use GetSettings to obtain the service account ID.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information. + "kmsKeyName": "A String", # Optional. The resource name for the configured Cloud KMS key.KMS key name format: "projects/[PROJECT_ID]/locations/[LOCATION]/keyRings/[KEYRING]/cryptoKeys/[KEY]" For example:"projects/my-project/locations/us-central1/keyRings/my-ring/cryptoKeys/my-key"To enable CMEK, set this field to a valid kms_key_name for which the associated service account has the required roles/cloudkms.cryptoKeyEncrypterDecrypter role assigned for the key.The Cloud KMS key used by the Log Router can be updated by changing the kms_key_name to a new valid key name.To disable CMEK for the Log Router, set this field to an empty string.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information. + "kmsServiceAccountId": "A String", # Output only. The service account that will be used by the Log Router to access your Cloud KMS key.Before enabling CMEK, you must first assign the role roles/cloudkms.cryptoKeyEncrypterDecrypter to the service account that will be used to access your Cloud KMS key. Use GetSettings to obtain the service account ID.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information. "loggingServiceAccountId": "A String", # Output only. The service account for the given resource container, such as project or folder. Log sinks use this service account as their writer_identity if no custom service account is provided in the request when calling the create sink method. "name": "A String", # Output only. The resource name of the settings. "storageLocation": "A String", # Optional. The storage location that Cloud Logging will use to create new resources when a location is needed but not explicitly provided. The use cases includes: The location of _Default and _Required log bucket for newly created projects and folders.Example value: europe-west1.Note: this setting does not affect the location of resources where a location is explicitly provided when created, such as custom log buckets. @@ -210,14 +210,14 @@

Method Details

updateSettings(name, body=None, updateMask=None, x__xgafv=None) -
Updates the Log Router settings for the given resource.Note: Settings for the Log Router can currently only be configured for Google Cloud organizations. Once configured, it applies to all projects and folders in the Google Cloud organization.UpdateSettings will fail if 1) kms_key_name is invalid, or 2) the associated service account does not have the required roles/cloudkms.cryptoKeyEncrypterDecrypter role assigned for the key, or 3) access to the key is disabled. 4) location_id is not supported by Logging. 5) location_id violate OrgPolicy.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.
+  
Updates the settings for the given resource. This method applies to all feature configurations for organization and folders.UpdateSettings will fail if 1) kms_key_name is invalid, 2) the associated service account does not have the required roles/cloudkms.cryptoKeyEncrypterDecrypter role assigned for the key, 3) access to the key is disabled, 4) storage_location is not supported by Logging, 5) storage_location violates the location OrgPolicy, or 6) default_sink_config is set but has an unspecified filter write mode.See Configure default settings for organizations and folders (https://cloud.google.com/logging/docs/default-settings) for more information.
 
 Args:
-  name: string, Required. The resource name for the settings to update. "organizations/[ORGANIZATION_ID]/settings" For example:"organizations/12345/settings"Note: Settings for the Log Router can currently only be configured for Google Cloud organizations. Once configured, it applies to all projects and folders in the Google Cloud organization. (required)
+  name: string, Required. The resource name for the settings to update. "organizations/[ORGANIZATION_ID]/settings" For example:"organizations/12345/settings" (required)
   body: object, The request body.
     The object takes the form of:
 
-{ # Describes the settings associated with a project, folder, organization, billing account, or flexible resource.
+{ # Describes the settings associated with a project, folder, organization, or billing account.
   "defaultSinkConfig": { # Describes the custom _Default sink configuration that is used to override the built-in _Default sink configuration in newly created resource containers, such as projects or folders. # Optional. Overrides the built-in configuration for _Default sink.
     "exclusions": [ # Optional. Specifies the set of exclusions to be added to the _Default sink in newly created resource containers.
       { # Specifies a set of log entries that are filtered out by a sink. If your Google Cloud resource receives a large volume of log entries, you can use exclusions to reduce your chargeable logs. Note that exclusions on organization-level and folder-level sinks don't apply to child resources. Note also that you cannot modify the _Required sink or exclude logs from it.
@@ -233,8 +233,8 @@ 

Method Details

"mode": "A String", # Required. Determines the behavior to apply to the built-in _Default sink inclusion filter.Exclusions are always appended, as built-in _Default sinks have no exclusions. }, "disableDefaultSink": True or False, # Optional. If set to true, the _Default sink in newly created projects and folders will created in a disabled state. This can be used to automatically disable log storage if there is already an aggregated sink configured in the hierarchy. The _Default sink can be re-enabled manually if needed. - "kmsKeyName": "A String", # Optional. The resource name for the configured Cloud KMS key.KMS key name format: "projects/[PROJECT_ID]/locations/[LOCATION]/keyRings/[KEYRING]/cryptoKeys/[KEY]" For example:"projects/my-project/locations/us-central1/keyRings/my-ring/cryptoKeys/my-key"To enable CMEK for the Log Router, set this field to a valid kms_key_name for which the associated service account has the required roles/cloudkms.cryptoKeyEncrypterDecrypter role assigned for the key.The Cloud KMS key used by the Log Router can be updated by changing the kms_key_name to a new valid key name. Encryption operations that are in progress will be completed with the key that was in use when they started. Decryption operations will be completed using the key that was used at the time of encryption unless access to that key has been revoked.To disable CMEK for the Log Router, set this field to an empty string.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information. - "kmsServiceAccountId": "A String", # Output only. The service account that will be used by the Log Router to access your Cloud KMS key.Before enabling CMEK for Log Router, you must first assign the role roles/cloudkms.cryptoKeyEncrypterDecrypter to the service account that the Log Router will use to access your Cloud KMS key. Use GetSettings to obtain the service account ID.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information. + "kmsKeyName": "A String", # Optional. The resource name for the configured Cloud KMS key.KMS key name format: "projects/[PROJECT_ID]/locations/[LOCATION]/keyRings/[KEYRING]/cryptoKeys/[KEY]" For example:"projects/my-project/locations/us-central1/keyRings/my-ring/cryptoKeys/my-key"To enable CMEK, set this field to a valid kms_key_name for which the associated service account has the required roles/cloudkms.cryptoKeyEncrypterDecrypter role assigned for the key.The Cloud KMS key used by the Log Router can be updated by changing the kms_key_name to a new valid key name.To disable CMEK for the Log Router, set this field to an empty string.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information. + "kmsServiceAccountId": "A String", # Output only. The service account that will be used by the Log Router to access your Cloud KMS key.Before enabling CMEK, you must first assign the role roles/cloudkms.cryptoKeyEncrypterDecrypter to the service account that will be used to access your Cloud KMS key. Use GetSettings to obtain the service account ID.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information. "loggingServiceAccountId": "A String", # Output only. The service account for the given resource container, such as project or folder. Log sinks use this service account as their writer_identity if no custom service account is provided in the request when calling the create sink method. "name": "A String", # Output only. The resource name of the settings. "storageLocation": "A String", # Optional. The storage location that Cloud Logging will use to create new resources when a location is needed but not explicitly provided. The use cases includes: The location of _Default and _Required log bucket for newly created projects and folders.Example value: europe-west1.Note: this setting does not affect the location of resources where a location is explicitly provided when created, such as custom log buckets. @@ -249,7 +249,7 @@

Method Details

Returns: An object of the form: - { # Describes the settings associated with a project, folder, organization, billing account, or flexible resource. + { # Describes the settings associated with a project, folder, organization, or billing account. "defaultSinkConfig": { # Describes the custom _Default sink configuration that is used to override the built-in _Default sink configuration in newly created resource containers, such as projects or folders. # Optional. Overrides the built-in configuration for _Default sink. "exclusions": [ # Optional. Specifies the set of exclusions to be added to the _Default sink in newly created resource containers. { # Specifies a set of log entries that are filtered out by a sink. If your Google Cloud resource receives a large volume of log entries, you can use exclusions to reduce your chargeable logs. Note that exclusions on organization-level and folder-level sinks don't apply to child resources. Note also that you cannot modify the _Required sink or exclude logs from it. @@ -265,8 +265,8 @@

Method Details

"mode": "A String", # Required. Determines the behavior to apply to the built-in _Default sink inclusion filter.Exclusions are always appended, as built-in _Default sinks have no exclusions. }, "disableDefaultSink": True or False, # Optional. If set to true, the _Default sink in newly created projects and folders will created in a disabled state. This can be used to automatically disable log storage if there is already an aggregated sink configured in the hierarchy. The _Default sink can be re-enabled manually if needed. - "kmsKeyName": "A String", # Optional. The resource name for the configured Cloud KMS key.KMS key name format: "projects/[PROJECT_ID]/locations/[LOCATION]/keyRings/[KEYRING]/cryptoKeys/[KEY]" For example:"projects/my-project/locations/us-central1/keyRings/my-ring/cryptoKeys/my-key"To enable CMEK for the Log Router, set this field to a valid kms_key_name for which the associated service account has the required roles/cloudkms.cryptoKeyEncrypterDecrypter role assigned for the key.The Cloud KMS key used by the Log Router can be updated by changing the kms_key_name to a new valid key name. Encryption operations that are in progress will be completed with the key that was in use when they started. Decryption operations will be completed using the key that was used at the time of encryption unless access to that key has been revoked.To disable CMEK for the Log Router, set this field to an empty string.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information. - "kmsServiceAccountId": "A String", # Output only. The service account that will be used by the Log Router to access your Cloud KMS key.Before enabling CMEK for Log Router, you must first assign the role roles/cloudkms.cryptoKeyEncrypterDecrypter to the service account that the Log Router will use to access your Cloud KMS key. Use GetSettings to obtain the service account ID.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information. + "kmsKeyName": "A String", # Optional. The resource name for the configured Cloud KMS key.KMS key name format: "projects/[PROJECT_ID]/locations/[LOCATION]/keyRings/[KEYRING]/cryptoKeys/[KEY]" For example:"projects/my-project/locations/us-central1/keyRings/my-ring/cryptoKeys/my-key"To enable CMEK, set this field to a valid kms_key_name for which the associated service account has the required roles/cloudkms.cryptoKeyEncrypterDecrypter role assigned for the key.The Cloud KMS key used by the Log Router can be updated by changing the kms_key_name to a new valid key name.To disable CMEK for the Log Router, set this field to an empty string.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information. + "kmsServiceAccountId": "A String", # Output only. The service account that will be used by the Log Router to access your Cloud KMS key.Before enabling CMEK, you must first assign the role roles/cloudkms.cryptoKeyEncrypterDecrypter to the service account that will be used to access your Cloud KMS key. Use GetSettings to obtain the service account ID.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information. "loggingServiceAccountId": "A String", # Output only. The service account for the given resource container, such as project or folder. Log sinks use this service account as their writer_identity if no custom service account is provided in the request when calling the create sink method. "name": "A String", # Output only. The resource name of the settings. "storageLocation": "A String", # Optional. The storage location that Cloud Logging will use to create new resources when a location is needed but not explicitly provided. The use cases includes: The location of _Default and _Required log bucket for newly created projects and folders.Example value: europe-west1.Note: this setting does not affect the location of resources where a location is explicitly provided when created, such as custom log buckets. diff --git a/docs/dyn/logging_v2.projects.html b/docs/dyn/logging_v2.projects.html index a6130df82ea..0b355a8f98b 100644 --- a/docs/dyn/logging_v2.projects.html +++ b/docs/dyn/logging_v2.projects.html @@ -104,10 +104,10 @@

Instance Methods

Close httplib2 connections.

getCmekSettings(name, x__xgafv=None)

-

Gets the Logging CMEK settings for the given resource.Note: CMEK for the Log Router can be configured for Google Cloud projects, folders, organizations and billing accounts. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.

+

Gets the Logging CMEK settings for the given resource.Note: CMEK for the Log Router can be configured for Google Cloud projects, folders, organizations, and billing accounts. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.

getSettings(name, x__xgafv=None)

-

Gets the Log Router settings for the given resource.Note: Settings for the Log Router can be get for Google Cloud projects, folders, organizations and billing accounts. Currently it can only be configured for organizations. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.

+

Gets the settings for the given resource.Note: Settings can be retrieved for Google Cloud projects, folders, organizations, and billing accounts.See View default resource settings for Logging (https://cloud.google.com/logging/docs/default-settings#view-org-settings) for more information.

Method Details

close() @@ -116,10 +116,10 @@

Method Details

getCmekSettings(name, x__xgafv=None) -
Gets the Logging CMEK settings for the given resource.Note: CMEK for the Log Router can be configured for Google Cloud projects, folders, organizations and billing accounts. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.
+  
Gets the Logging CMEK settings for the given resource.Note: CMEK for the Log Router can be configured for Google Cloud projects, folders, organizations, and billing accounts. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.
 
 Args:
-  name: string, Required. The resource for which to retrieve CMEK settings. "projects/[PROJECT_ID]/cmekSettings" "organizations/[ORGANIZATION_ID]/cmekSettings" "billingAccounts/[BILLING_ACCOUNT_ID]/cmekSettings" "folders/[FOLDER_ID]/cmekSettings" For example:"organizations/12345/cmekSettings"Note: CMEK for the Log Router can be configured for Google Cloud projects, folders, organizations and billing accounts. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization. (required)
+  name: string, Required. The resource for which to retrieve CMEK settings. "projects/[PROJECT_ID]/cmekSettings" "organizations/[ORGANIZATION_ID]/cmekSettings" "billingAccounts/[BILLING_ACCOUNT_ID]/cmekSettings" "folders/[FOLDER_ID]/cmekSettings" For example:"organizations/12345/cmekSettings"Note: CMEK for the Log Router can be configured for Google Cloud projects, folders, organizations, and billing accounts. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization. (required)
   x__xgafv: string, V1 error format.
     Allowed values
       1 - v1 error format
@@ -138,10 +138,10 @@ 

Method Details

getSettings(name, x__xgafv=None) -
Gets the Log Router settings for the given resource.Note: Settings for the Log Router can be get for Google Cloud projects, folders, organizations and billing accounts. Currently it can only be configured for organizations. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.
+  
Gets the settings for the given resource.Note: Settings can be retrieved for Google Cloud projects, folders, organizations, and billing accounts.See View default resource settings for Logging (https://cloud.google.com/logging/docs/default-settings#view-org-settings) for more information.
 
 Args:
-  name: string, Required. The resource for which to retrieve settings. "projects/[PROJECT_ID]/settings" "organizations/[ORGANIZATION_ID]/settings" "billingAccounts/[BILLING_ACCOUNT_ID]/settings" "folders/[FOLDER_ID]/settings" For example:"organizations/12345/settings"Note: Settings for the Log Router can be get for Google Cloud projects, folders, organizations and billing accounts. Currently it can only be configured for organizations. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization. (required)
+  name: string, Required. The resource for which to retrieve settings. "projects/[PROJECT_ID]/settings" "organizations/[ORGANIZATION_ID]/settings" "billingAccounts/[BILLING_ACCOUNT_ID]/settings" "folders/[FOLDER_ID]/settings" For example:"organizations/12345/settings"Note: Settings can be retrieved for Google Cloud projects, folders, organizations, and billing accounts. (required)
   x__xgafv: string, V1 error format.
     Allowed values
       1 - v1 error format
@@ -150,7 +150,7 @@ 

Method Details

Returns: An object of the form: - { # Describes the settings associated with a project, folder, organization, billing account, or flexible resource. + { # Describes the settings associated with a project, folder, organization, or billing account. "defaultSinkConfig": { # Describes the custom _Default sink configuration that is used to override the built-in _Default sink configuration in newly created resource containers, such as projects or folders. # Optional. Overrides the built-in configuration for _Default sink. "exclusions": [ # Optional. Specifies the set of exclusions to be added to the _Default sink in newly created resource containers. { # Specifies a set of log entries that are filtered out by a sink. If your Google Cloud resource receives a large volume of log entries, you can use exclusions to reduce your chargeable logs. Note that exclusions on organization-level and folder-level sinks don't apply to child resources. Note also that you cannot modify the _Required sink or exclude logs from it. @@ -166,8 +166,8 @@

Method Details

"mode": "A String", # Required. Determines the behavior to apply to the built-in _Default sink inclusion filter.Exclusions are always appended, as built-in _Default sinks have no exclusions. }, "disableDefaultSink": True or False, # Optional. If set to true, the _Default sink in newly created projects and folders will created in a disabled state. This can be used to automatically disable log storage if there is already an aggregated sink configured in the hierarchy. The _Default sink can be re-enabled manually if needed. - "kmsKeyName": "A String", # Optional. The resource name for the configured Cloud KMS key.KMS key name format: "projects/[PROJECT_ID]/locations/[LOCATION]/keyRings/[KEYRING]/cryptoKeys/[KEY]" For example:"projects/my-project/locations/us-central1/keyRings/my-ring/cryptoKeys/my-key"To enable CMEK for the Log Router, set this field to a valid kms_key_name for which the associated service account has the required roles/cloudkms.cryptoKeyEncrypterDecrypter role assigned for the key.The Cloud KMS key used by the Log Router can be updated by changing the kms_key_name to a new valid key name. Encryption operations that are in progress will be completed with the key that was in use when they started. Decryption operations will be completed using the key that was used at the time of encryption unless access to that key has been revoked.To disable CMEK for the Log Router, set this field to an empty string.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information. - "kmsServiceAccountId": "A String", # Output only. The service account that will be used by the Log Router to access your Cloud KMS key.Before enabling CMEK for Log Router, you must first assign the role roles/cloudkms.cryptoKeyEncrypterDecrypter to the service account that the Log Router will use to access your Cloud KMS key. Use GetSettings to obtain the service account ID.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information. + "kmsKeyName": "A String", # Optional. The resource name for the configured Cloud KMS key.KMS key name format: "projects/[PROJECT_ID]/locations/[LOCATION]/keyRings/[KEYRING]/cryptoKeys/[KEY]" For example:"projects/my-project/locations/us-central1/keyRings/my-ring/cryptoKeys/my-key"To enable CMEK, set this field to a valid kms_key_name for which the associated service account has the required roles/cloudkms.cryptoKeyEncrypterDecrypter role assigned for the key.The Cloud KMS key used by the Log Router can be updated by changing the kms_key_name to a new valid key name.To disable CMEK for the Log Router, set this field to an empty string.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information. + "kmsServiceAccountId": "A String", # Output only. The service account that will be used by the Log Router to access your Cloud KMS key.Before enabling CMEK, you must first assign the role roles/cloudkms.cryptoKeyEncrypterDecrypter to the service account that will be used to access your Cloud KMS key. Use GetSettings to obtain the service account ID.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information. "loggingServiceAccountId": "A String", # Output only. The service account for the given resource container, such as project or folder. Log sinks use this service account as their writer_identity if no custom service account is provided in the request when calling the create sink method. "name": "A String", # Output only. The resource name of the settings. "storageLocation": "A String", # Optional. The storage location that Cloud Logging will use to create new resources when a location is needed but not explicitly provided. The use cases includes: The location of _Default and _Required log bucket for newly created projects and folders.Example value: europe-west1.Note: this setting does not affect the location of resources where a location is explicitly provided when created, such as custom log buckets. diff --git a/docs/dyn/logging_v2.v2.html b/docs/dyn/logging_v2.v2.html index 257bba3c236..ed8c0a95779 100644 --- a/docs/dyn/logging_v2.v2.html +++ b/docs/dyn/logging_v2.v2.html @@ -79,16 +79,16 @@

Instance Methods

Close httplib2 connections.

getCmekSettings(name, x__xgafv=None)

-

Gets the Logging CMEK settings for the given resource.Note: CMEK for the Log Router can be configured for Google Cloud projects, folders, organizations and billing accounts. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.

+

Gets the Logging CMEK settings for the given resource.Note: CMEK for the Log Router can be configured for Google Cloud projects, folders, organizations, and billing accounts. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.

getSettings(name, x__xgafv=None)

-

Gets the Log Router settings for the given resource.Note: Settings for the Log Router can be get for Google Cloud projects, folders, organizations and billing accounts. Currently it can only be configured for organizations. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.

+

Gets the settings for the given resource.Note: Settings can be retrieved for Google Cloud projects, folders, organizations, and billing accounts.See View default resource settings for Logging (https://cloud.google.com/logging/docs/default-settings#view-org-settings) for more information.

updateCmekSettings(name, body=None, updateMask=None, x__xgafv=None)

Updates the Log Router CMEK settings for the given resource.Note: CMEK for the Log Router can currently only be configured for Google Cloud organizations. Once configured, it applies to all projects and folders in the Google Cloud organization.UpdateCmekSettings will fail if 1) kms_key_name is invalid, or 2) the associated service account does not have the required roles/cloudkms.cryptoKeyEncrypterDecrypter role assigned for the key, or 3) access to the key is disabled.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.

updateSettings(name, body=None, updateMask=None, x__xgafv=None)

-

Updates the Log Router settings for the given resource.Note: Settings for the Log Router can currently only be configured for Google Cloud organizations. Once configured, it applies to all projects and folders in the Google Cloud organization.UpdateSettings will fail if 1) kms_key_name is invalid, or 2) the associated service account does not have the required roles/cloudkms.cryptoKeyEncrypterDecrypter role assigned for the key, or 3) access to the key is disabled. 4) location_id is not supported by Logging. 5) location_id violate OrgPolicy.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.

+

Updates the settings for the given resource. This method applies to all feature configurations for organization and folders.UpdateSettings will fail if 1) kms_key_name is invalid, 2) the associated service account does not have the required roles/cloudkms.cryptoKeyEncrypterDecrypter role assigned for the key, 3) access to the key is disabled, 4) storage_location is not supported by Logging, 5) storage_location violates the location OrgPolicy, or 6) default_sink_config is set but has an unspecified filter write mode.See Configure default settings for organizations and folders (https://cloud.google.com/logging/docs/default-settings) for more information.

Method Details

close() @@ -97,10 +97,10 @@

Method Details

getCmekSettings(name, x__xgafv=None) -
Gets the Logging CMEK settings for the given resource.Note: CMEK for the Log Router can be configured for Google Cloud projects, folders, organizations and billing accounts. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.
+  
Gets the Logging CMEK settings for the given resource.Note: CMEK for the Log Router can be configured for Google Cloud projects, folders, organizations, and billing accounts. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.
 
 Args:
-  name: string, Required. The resource for which to retrieve CMEK settings. "projects/[PROJECT_ID]/cmekSettings" "organizations/[ORGANIZATION_ID]/cmekSettings" "billingAccounts/[BILLING_ACCOUNT_ID]/cmekSettings" "folders/[FOLDER_ID]/cmekSettings" For example:"organizations/12345/cmekSettings"Note: CMEK for the Log Router can be configured for Google Cloud projects, folders, organizations and billing accounts. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization. (required)
+  name: string, Required. The resource for which to retrieve CMEK settings. "projects/[PROJECT_ID]/cmekSettings" "organizations/[ORGANIZATION_ID]/cmekSettings" "billingAccounts/[BILLING_ACCOUNT_ID]/cmekSettings" "folders/[FOLDER_ID]/cmekSettings" For example:"organizations/12345/cmekSettings"Note: CMEK for the Log Router can be configured for Google Cloud projects, folders, organizations, and billing accounts. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization. (required)
   x__xgafv: string, V1 error format.
     Allowed values
       1 - v1 error format
@@ -119,10 +119,10 @@ 

Method Details

getSettings(name, x__xgafv=None) -
Gets the Log Router settings for the given resource.Note: Settings for the Log Router can be get for Google Cloud projects, folders, organizations and billing accounts. Currently it can only be configured for organizations. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.
+  
Gets the settings for the given resource.Note: Settings can be retrieved for Google Cloud projects, folders, organizations, and billing accounts.See View default resource settings for Logging (https://cloud.google.com/logging/docs/default-settings#view-org-settings) for more information.
 
 Args:
-  name: string, Required. The resource for which to retrieve settings. "projects/[PROJECT_ID]/settings" "organizations/[ORGANIZATION_ID]/settings" "billingAccounts/[BILLING_ACCOUNT_ID]/settings" "folders/[FOLDER_ID]/settings" For example:"organizations/12345/settings"Note: Settings for the Log Router can be get for Google Cloud projects, folders, organizations and billing accounts. Currently it can only be configured for organizations. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization. (required)
+  name: string, Required. The resource for which to retrieve settings. "projects/[PROJECT_ID]/settings" "organizations/[ORGANIZATION_ID]/settings" "billingAccounts/[BILLING_ACCOUNT_ID]/settings" "folders/[FOLDER_ID]/settings" For example:"organizations/12345/settings"Note: Settings can be retrieved for Google Cloud projects, folders, organizations, and billing accounts. (required)
   x__xgafv: string, V1 error format.
     Allowed values
       1 - v1 error format
@@ -131,7 +131,7 @@ 

Method Details

Returns: An object of the form: - { # Describes the settings associated with a project, folder, organization, billing account, or flexible resource. + { # Describes the settings associated with a project, folder, organization, or billing account. "defaultSinkConfig": { # Describes the custom _Default sink configuration that is used to override the built-in _Default sink configuration in newly created resource containers, such as projects or folders. # Optional. Overrides the built-in configuration for _Default sink. "exclusions": [ # Optional. Specifies the set of exclusions to be added to the _Default sink in newly created resource containers. { # Specifies a set of log entries that are filtered out by a sink. If your Google Cloud resource receives a large volume of log entries, you can use exclusions to reduce your chargeable logs. Note that exclusions on organization-level and folder-level sinks don't apply to child resources. Note also that you cannot modify the _Required sink or exclude logs from it. @@ -147,8 +147,8 @@

Method Details

"mode": "A String", # Required. Determines the behavior to apply to the built-in _Default sink inclusion filter.Exclusions are always appended, as built-in _Default sinks have no exclusions. }, "disableDefaultSink": True or False, # Optional. If set to true, the _Default sink in newly created projects and folders will created in a disabled state. This can be used to automatically disable log storage if there is already an aggregated sink configured in the hierarchy. The _Default sink can be re-enabled manually if needed. - "kmsKeyName": "A String", # Optional. The resource name for the configured Cloud KMS key.KMS key name format: "projects/[PROJECT_ID]/locations/[LOCATION]/keyRings/[KEYRING]/cryptoKeys/[KEY]" For example:"projects/my-project/locations/us-central1/keyRings/my-ring/cryptoKeys/my-key"To enable CMEK for the Log Router, set this field to a valid kms_key_name for which the associated service account has the required roles/cloudkms.cryptoKeyEncrypterDecrypter role assigned for the key.The Cloud KMS key used by the Log Router can be updated by changing the kms_key_name to a new valid key name. Encryption operations that are in progress will be completed with the key that was in use when they started. Decryption operations will be completed using the key that was used at the time of encryption unless access to that key has been revoked.To disable CMEK for the Log Router, set this field to an empty string.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information. - "kmsServiceAccountId": "A String", # Output only. The service account that will be used by the Log Router to access your Cloud KMS key.Before enabling CMEK for Log Router, you must first assign the role roles/cloudkms.cryptoKeyEncrypterDecrypter to the service account that the Log Router will use to access your Cloud KMS key. Use GetSettings to obtain the service account ID.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information. + "kmsKeyName": "A String", # Optional. The resource name for the configured Cloud KMS key.KMS key name format: "projects/[PROJECT_ID]/locations/[LOCATION]/keyRings/[KEYRING]/cryptoKeys/[KEY]" For example:"projects/my-project/locations/us-central1/keyRings/my-ring/cryptoKeys/my-key"To enable CMEK, set this field to a valid kms_key_name for which the associated service account has the required roles/cloudkms.cryptoKeyEncrypterDecrypter role assigned for the key.The Cloud KMS key used by the Log Router can be updated by changing the kms_key_name to a new valid key name.To disable CMEK for the Log Router, set this field to an empty string.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information. + "kmsServiceAccountId": "A String", # Output only. The service account that will be used by the Log Router to access your Cloud KMS key.Before enabling CMEK, you must first assign the role roles/cloudkms.cryptoKeyEncrypterDecrypter to the service account that will be used to access your Cloud KMS key. Use GetSettings to obtain the service account ID.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information. "loggingServiceAccountId": "A String", # Output only. The service account for the given resource container, such as project or folder. Log sinks use this service account as their writer_identity if no custom service account is provided in the request when calling the create sink method. "name": "A String", # Output only. The resource name of the settings. "storageLocation": "A String", # Optional. The storage location that Cloud Logging will use to create new resources when a location is needed but not explicitly provided. The use cases includes: The location of _Default and _Required log bucket for newly created projects and folders.Example value: europe-west1.Note: this setting does not affect the location of resources where a location is explicitly provided when created, such as custom log buckets. @@ -190,14 +190,14 @@

Method Details

updateSettings(name, body=None, updateMask=None, x__xgafv=None) -
Updates the Log Router settings for the given resource.Note: Settings for the Log Router can currently only be configured for Google Cloud organizations. Once configured, it applies to all projects and folders in the Google Cloud organization.UpdateSettings will fail if 1) kms_key_name is invalid, or 2) the associated service account does not have the required roles/cloudkms.cryptoKeyEncrypterDecrypter role assigned for the key, or 3) access to the key is disabled. 4) location_id is not supported by Logging. 5) location_id violate OrgPolicy.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.
+  
Updates the settings for the given resource. This method applies to all feature configurations for organization and folders.UpdateSettings will fail if 1) kms_key_name is invalid, 2) the associated service account does not have the required roles/cloudkms.cryptoKeyEncrypterDecrypter role assigned for the key, 3) access to the key is disabled, 4) storage_location is not supported by Logging, 5) storage_location violates the location OrgPolicy, or 6) default_sink_config is set but has an unspecified filter write mode.See Configure default settings for organizations and folders (https://cloud.google.com/logging/docs/default-settings) for more information.
 
 Args:
-  name: string, Required. The resource name for the settings to update. "organizations/[ORGANIZATION_ID]/settings" For example:"organizations/12345/settings"Note: Settings for the Log Router can currently only be configured for Google Cloud organizations. Once configured, it applies to all projects and folders in the Google Cloud organization. (required)
+  name: string, Required. The resource name for the settings to update. "organizations/[ORGANIZATION_ID]/settings" For example:"organizations/12345/settings" (required)
   body: object, The request body.
     The object takes the form of:
 
-{ # Describes the settings associated with a project, folder, organization, billing account, or flexible resource.
+{ # Describes the settings associated with a project, folder, organization, or billing account.
   "defaultSinkConfig": { # Describes the custom _Default sink configuration that is used to override the built-in _Default sink configuration in newly created resource containers, such as projects or folders. # Optional. Overrides the built-in configuration for _Default sink.
     "exclusions": [ # Optional. Specifies the set of exclusions to be added to the _Default sink in newly created resource containers.
       { # Specifies a set of log entries that are filtered out by a sink. If your Google Cloud resource receives a large volume of log entries, you can use exclusions to reduce your chargeable logs. Note that exclusions on organization-level and folder-level sinks don't apply to child resources. Note also that you cannot modify the _Required sink or exclude logs from it.
@@ -213,8 +213,8 @@ 

Method Details

"mode": "A String", # Required. Determines the behavior to apply to the built-in _Default sink inclusion filter.Exclusions are always appended, as built-in _Default sinks have no exclusions. }, "disableDefaultSink": True or False, # Optional. If set to true, the _Default sink in newly created projects and folders will created in a disabled state. This can be used to automatically disable log storage if there is already an aggregated sink configured in the hierarchy. The _Default sink can be re-enabled manually if needed. - "kmsKeyName": "A String", # Optional. The resource name for the configured Cloud KMS key.KMS key name format: "projects/[PROJECT_ID]/locations/[LOCATION]/keyRings/[KEYRING]/cryptoKeys/[KEY]" For example:"projects/my-project/locations/us-central1/keyRings/my-ring/cryptoKeys/my-key"To enable CMEK for the Log Router, set this field to a valid kms_key_name for which the associated service account has the required roles/cloudkms.cryptoKeyEncrypterDecrypter role assigned for the key.The Cloud KMS key used by the Log Router can be updated by changing the kms_key_name to a new valid key name. Encryption operations that are in progress will be completed with the key that was in use when they started. Decryption operations will be completed using the key that was used at the time of encryption unless access to that key has been revoked.To disable CMEK for the Log Router, set this field to an empty string.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information. - "kmsServiceAccountId": "A String", # Output only. The service account that will be used by the Log Router to access your Cloud KMS key.Before enabling CMEK for Log Router, you must first assign the role roles/cloudkms.cryptoKeyEncrypterDecrypter to the service account that the Log Router will use to access your Cloud KMS key. Use GetSettings to obtain the service account ID.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information. + "kmsKeyName": "A String", # Optional. The resource name for the configured Cloud KMS key.KMS key name format: "projects/[PROJECT_ID]/locations/[LOCATION]/keyRings/[KEYRING]/cryptoKeys/[KEY]" For example:"projects/my-project/locations/us-central1/keyRings/my-ring/cryptoKeys/my-key"To enable CMEK, set this field to a valid kms_key_name for which the associated service account has the required roles/cloudkms.cryptoKeyEncrypterDecrypter role assigned for the key.The Cloud KMS key used by the Log Router can be updated by changing the kms_key_name to a new valid key name.To disable CMEK for the Log Router, set this field to an empty string.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information. + "kmsServiceAccountId": "A String", # Output only. The service account that will be used by the Log Router to access your Cloud KMS key.Before enabling CMEK, you must first assign the role roles/cloudkms.cryptoKeyEncrypterDecrypter to the service account that will be used to access your Cloud KMS key. Use GetSettings to obtain the service account ID.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information. "loggingServiceAccountId": "A String", # Output only. The service account for the given resource container, such as project or folder. Log sinks use this service account as their writer_identity if no custom service account is provided in the request when calling the create sink method. "name": "A String", # Output only. The resource name of the settings. "storageLocation": "A String", # Optional. The storage location that Cloud Logging will use to create new resources when a location is needed but not explicitly provided. The use cases includes: The location of _Default and _Required log bucket for newly created projects and folders.Example value: europe-west1.Note: this setting does not affect the location of resources where a location is explicitly provided when created, such as custom log buckets. @@ -229,7 +229,7 @@

Method Details

Returns: An object of the form: - { # Describes the settings associated with a project, folder, organization, billing account, or flexible resource. + { # Describes the settings associated with a project, folder, organization, or billing account. "defaultSinkConfig": { # Describes the custom _Default sink configuration that is used to override the built-in _Default sink configuration in newly created resource containers, such as projects or folders. # Optional. Overrides the built-in configuration for _Default sink. "exclusions": [ # Optional. Specifies the set of exclusions to be added to the _Default sink in newly created resource containers. { # Specifies a set of log entries that are filtered out by a sink. If your Google Cloud resource receives a large volume of log entries, you can use exclusions to reduce your chargeable logs. Note that exclusions on organization-level and folder-level sinks don't apply to child resources. Note also that you cannot modify the _Required sink or exclude logs from it. @@ -245,8 +245,8 @@

Method Details

"mode": "A String", # Required. Determines the behavior to apply to the built-in _Default sink inclusion filter.Exclusions are always appended, as built-in _Default sinks have no exclusions. }, "disableDefaultSink": True or False, # Optional. If set to true, the _Default sink in newly created projects and folders will created in a disabled state. This can be used to automatically disable log storage if there is already an aggregated sink configured in the hierarchy. The _Default sink can be re-enabled manually if needed. - "kmsKeyName": "A String", # Optional. The resource name for the configured Cloud KMS key.KMS key name format: "projects/[PROJECT_ID]/locations/[LOCATION]/keyRings/[KEYRING]/cryptoKeys/[KEY]" For example:"projects/my-project/locations/us-central1/keyRings/my-ring/cryptoKeys/my-key"To enable CMEK for the Log Router, set this field to a valid kms_key_name for which the associated service account has the required roles/cloudkms.cryptoKeyEncrypterDecrypter role assigned for the key.The Cloud KMS key used by the Log Router can be updated by changing the kms_key_name to a new valid key name. Encryption operations that are in progress will be completed with the key that was in use when they started. Decryption operations will be completed using the key that was used at the time of encryption unless access to that key has been revoked.To disable CMEK for the Log Router, set this field to an empty string.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information. - "kmsServiceAccountId": "A String", # Output only. The service account that will be used by the Log Router to access your Cloud KMS key.Before enabling CMEK for Log Router, you must first assign the role roles/cloudkms.cryptoKeyEncrypterDecrypter to the service account that the Log Router will use to access your Cloud KMS key. Use GetSettings to obtain the service account ID.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information. + "kmsKeyName": "A String", # Optional. The resource name for the configured Cloud KMS key.KMS key name format: "projects/[PROJECT_ID]/locations/[LOCATION]/keyRings/[KEYRING]/cryptoKeys/[KEY]" For example:"projects/my-project/locations/us-central1/keyRings/my-ring/cryptoKeys/my-key"To enable CMEK, set this field to a valid kms_key_name for which the associated service account has the required roles/cloudkms.cryptoKeyEncrypterDecrypter role assigned for the key.The Cloud KMS key used by the Log Router can be updated by changing the kms_key_name to a new valid key name.To disable CMEK for the Log Router, set this field to an empty string.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information. + "kmsServiceAccountId": "A String", # Output only. The service account that will be used by the Log Router to access your Cloud KMS key.Before enabling CMEK, you must first assign the role roles/cloudkms.cryptoKeyEncrypterDecrypter to the service account that will be used to access your Cloud KMS key. Use GetSettings to obtain the service account ID.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information. "loggingServiceAccountId": "A String", # Output only. The service account for the given resource container, such as project or folder. Log sinks use this service account as their writer_identity if no custom service account is provided in the request when calling the create sink method. "name": "A String", # Output only. The resource name of the settings. "storageLocation": "A String", # Optional. The storage location that Cloud Logging will use to create new resources when a location is needed but not explicitly provided. The use cases includes: The location of _Default and _Required log bucket for newly created projects and folders.Example value: europe-west1.Note: this setting does not affect the location of resources where a location is explicitly provided when created, such as custom log buckets. diff --git a/docs/dyn/metastore_v1.projects.locations.services.backups.html b/docs/dyn/metastore_v1.projects.locations.services.backups.html index 2949afe443f..b67a27455b8 100644 --- a/docs/dyn/metastore_v1.projects.locations.services.backups.html +++ b/docs/dyn/metastore_v1.projects.locations.services.backups.html @@ -185,7 +185,6 @@

Method Details

"restores": [ # Output only. The latest restores of the metastore service. { # The details of a metadata restore operation. "backup": "A String", # Output only. The relative resource name of the metastore service backup to restore from, in the following form:projects/{project_id}/locations/{location_id}/services/{service_id}/backups/{backup_id}. - "backupLocation": "A String", # Optional. A Cloud Storage URI specifying where the backup artifacts are stored, in the format gs:///. "details": "A String", # Output only. The restore details containing the revision of the service to be restored to, in format of JSON. "endTime": "A String", # Output only. The time when the restore ended. "startTime": "A String", # Output only. The time when the restore started. @@ -376,7 +375,6 @@

Method Details

"restores": [ # Output only. The latest restores of the metastore service. { # The details of a metadata restore operation. "backup": "A String", # Output only. The relative resource name of the metastore service backup to restore from, in the following form:projects/{project_id}/locations/{location_id}/services/{service_id}/backups/{backup_id}. - "backupLocation": "A String", # Optional. A Cloud Storage URI specifying where the backup artifacts are stored, in the format gs:///. "details": "A String", # Output only. The restore details containing the revision of the service to be restored to, in format of JSON. "endTime": "A String", # Output only. The time when the restore ended. "startTime": "A String", # Output only. The time when the restore started. @@ -555,7 +553,6 @@

Method Details

"restores": [ # Output only. The latest restores of the metastore service. { # The details of a metadata restore operation. "backup": "A String", # Output only. The relative resource name of the metastore service backup to restore from, in the following form:projects/{project_id}/locations/{location_id}/services/{service_id}/backups/{backup_id}. - "backupLocation": "A String", # Optional. A Cloud Storage URI specifying where the backup artifacts are stored, in the format gs:///. "details": "A String", # Output only. The restore details containing the revision of the service to be restored to, in format of JSON. "endTime": "A String", # Output only. The time when the restore ended. "startTime": "A String", # Output only. The time when the restore started. diff --git a/docs/dyn/metastore_v1.projects.locations.services.html b/docs/dyn/metastore_v1.projects.locations.services.html index e77253d3dae..b901b287283 100644 --- a/docs/dyn/metastore_v1.projects.locations.services.html +++ b/docs/dyn/metastore_v1.projects.locations.services.html @@ -87,6 +87,9 @@

Instance Methods

alterLocation(service, body=None, x__xgafv=None)

Alter metadata resource location. The metadata resource can be a database, table, or partition. This functionality only updates the parent directory for the respective metadata resource and does not transfer any existing data to the new location.

+

+ alterTableProperties(service, body=None, x__xgafv=None)

+

Alter metadata table properties.

close()

Close httplib2 connections.

@@ -173,6 +176,52 @@

Method Details

}
+
+ alterTableProperties(service, body=None, x__xgafv=None) +
Alter metadata table properties.
+
+Args:
+  service: string, Required. The relative resource name of the Dataproc Metastore service that's being used to mutate metadata table properties, in the following format:projects/{project_id}/locations/{location_id}/services/{service_id}. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Request message for DataprocMetastore.AlterTableProperties.
+  "properties": { # A map that describes the desired values to mutate. If update_mask is empty, the properties will not update. Otherwise, the properties only alters the value whose associated paths exist in the update mask
+    "a_key": "A String",
+  },
+  "tableName": "A String", # Required. The name of the table containing the properties you're altering in the following format.databases/{database_id}/tables/{table_id}
+  "updateMask": "A String", # A field mask that specifies the metadata table properties that are overwritten by the update. Fields specified in the update_mask are relative to the resource (not to the full request). A field is overwritten if it is in the mask.For example, given the target properties: properties { a: 1 b: 2 } And an update properties: properties { a: 2 b: 3 c: 4 } then if the field mask is:paths: "properties.b", "properties.c"then the result will be: properties { a: 1 b: 3 c: 4 }
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # This resource represents a long-running operation that is the result of a network API call.
+  "done": True or False, # If the value is false, it means the operation is still in progress. If true, the operation is completed, and either error or response is available.
+  "error": { # The Status type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by gRPC (https://github.com/grpc). Each Status message contains three pieces of data: error code, error message, and error details.You can find out more about this error model and how to work with it in the API Design Guide (https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+    "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+    "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+      {
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    ],
+    "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+  },
+  "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+  "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the name should be a resource name ending with operations/{unique_id}.
+  "response": { # The normal, successful response of the operation. If the original method returns no data on success, such as Delete, the response is google.protobuf.Empty. If the original method is standard Get/Create/Update, the response should be the resource. For other methods, the response should have the type XxxResponse, where Xxx is the original method name. For example, if the original method name is TakeSnapshot(), the inferred response type is TakeSnapshotResponse.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+}
+
+
close()
Close httplib2 connections.
@@ -251,7 +300,6 @@

Method Details

"restores": [ # Output only. The latest restores of the metastore service. { # The details of a metadata restore operation. "backup": "A String", # Output only. The relative resource name of the metastore service backup to restore from, in the following form:projects/{project_id}/locations/{location_id}/services/{service_id}/backups/{backup_id}. - "backupLocation": "A String", # Optional. A Cloud Storage URI specifying where the backup artifacts are stored, in the format gs:///. "details": "A String", # Output only. The restore details containing the revision of the service to be restored to, in format of JSON. "endTime": "A String", # Output only. The time when the restore ended. "startTime": "A String", # Output only. The time when the restore started. @@ -476,7 +524,6 @@

Method Details

"restores": [ # Output only. The latest restores of the metastore service. { # The details of a metadata restore operation. "backup": "A String", # Output only. The relative resource name of the metastore service backup to restore from, in the following form:projects/{project_id}/locations/{location_id}/services/{service_id}/backups/{backup_id}. - "backupLocation": "A String", # Optional. A Cloud Storage URI specifying where the backup artifacts are stored, in the format gs:///. "details": "A String", # Output only. The restore details containing the revision of the service to be restored to, in format of JSON. "endTime": "A String", # Output only. The time when the restore ended. "startTime": "A String", # Output only. The time when the restore started. @@ -646,7 +693,6 @@

Method Details

"restores": [ # Output only. The latest restores of the metastore service. { # The details of a metadata restore operation. "backup": "A String", # Output only. The relative resource name of the metastore service backup to restore from, in the following form:projects/{project_id}/locations/{location_id}/services/{service_id}/backups/{backup_id}. - "backupLocation": "A String", # Optional. A Cloud Storage URI specifying where the backup artifacts are stored, in the format gs:///. "details": "A String", # Output only. The restore details containing the revision of the service to be restored to, in format of JSON. "endTime": "A String", # Output only. The time when the restore ended. "startTime": "A String", # Output only. The time when the restore started. @@ -819,7 +865,6 @@

Method Details

"restores": [ # Output only. The latest restores of the metastore service. { # The details of a metadata restore operation. "backup": "A String", # Output only. The relative resource name of the metastore service backup to restore from, in the following form:projects/{project_id}/locations/{location_id}/services/{service_id}/backups/{backup_id}. - "backupLocation": "A String", # Optional. A Cloud Storage URI specifying where the backup artifacts are stored, in the format gs:///. "details": "A String", # Output only. The restore details containing the revision of the service to be restored to, in format of JSON. "endTime": "A String", # Output only. The time when the restore ended. "startTime": "A String", # Output only. The time when the restore started. diff --git a/docs/dyn/metastore_v1alpha.projects.locations.services.backups.html b/docs/dyn/metastore_v1alpha.projects.locations.services.backups.html index 136aa5aba60..4be301819fb 100644 --- a/docs/dyn/metastore_v1alpha.projects.locations.services.backups.html +++ b/docs/dyn/metastore_v1alpha.projects.locations.services.backups.html @@ -196,7 +196,6 @@

Method Details

"restores": [ # Output only. The latest restores of the metastore service. { # The details of a metadata restore operation. "backup": "A String", # Output only. The relative resource name of the metastore service backup to restore from, in the following form:projects/{project_id}/locations/{location_id}/services/{service_id}/backups/{backup_id}. - "backupLocation": "A String", # Optional. A Cloud Storage URI specifying where the backup artifacts are stored, in the format gs:///. "details": "A String", # Output only. The restore details containing the revision of the service to be restored to, in format of JSON. "endTime": "A String", # Output only. The time when the restore ended. "startTime": "A String", # Output only. The time when the restore started. @@ -396,7 +395,6 @@

Method Details

"restores": [ # Output only. The latest restores of the metastore service. { # The details of a metadata restore operation. "backup": "A String", # Output only. The relative resource name of the metastore service backup to restore from, in the following form:projects/{project_id}/locations/{location_id}/services/{service_id}/backups/{backup_id}. - "backupLocation": "A String", # Optional. A Cloud Storage URI specifying where the backup artifacts are stored, in the format gs:///. "details": "A String", # Output only. The restore details containing the revision of the service to be restored to, in format of JSON. "endTime": "A String", # Output only. The time when the restore ended. "startTime": "A String", # Output only. The time when the restore started. @@ -584,7 +582,6 @@

Method Details

"restores": [ # Output only. The latest restores of the metastore service. { # The details of a metadata restore operation. "backup": "A String", # Output only. The relative resource name of the metastore service backup to restore from, in the following form:projects/{project_id}/locations/{location_id}/services/{service_id}/backups/{backup_id}. - "backupLocation": "A String", # Optional. A Cloud Storage URI specifying where the backup artifacts are stored, in the format gs:///. "details": "A String", # Output only. The restore details containing the revision of the service to be restored to, in format of JSON. "endTime": "A String", # Output only. The time when the restore ended. "startTime": "A String", # Output only. The time when the restore started. diff --git a/docs/dyn/metastore_v1alpha.projects.locations.services.html b/docs/dyn/metastore_v1alpha.projects.locations.services.html index 93fd61077e8..d2e17ab4b26 100644 --- a/docs/dyn/metastore_v1alpha.projects.locations.services.html +++ b/docs/dyn/metastore_v1alpha.projects.locations.services.html @@ -92,6 +92,9 @@

Instance Methods

alterLocation(service, body=None, x__xgafv=None)

Alter metadata resource location. The metadata resource can be a database, table, or partition. This functionality only updates the parent directory for the respective metadata resource and does not transfer any existing data to the new location.

+

+ alterTableProperties(service, body=None, x__xgafv=None)

+

Alter metadata table properties.

close()

Close httplib2 connections.

@@ -181,6 +184,52 @@

Method Details

}
+
+ alterTableProperties(service, body=None, x__xgafv=None) +
Alter metadata table properties.
+
+Args:
+  service: string, Required. The relative resource name of the Dataproc Metastore service that's being used to mutate metadata table properties, in the following format:projects/{project_id}/locations/{location_id}/services/{service_id}. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Request message for DataprocMetastore.AlterTableProperties.
+  "properties": { # A map that describes the desired values to mutate. If update_mask is empty, the properties will not update. Otherwise, the properties only alters the value whose associated paths exist in the update mask
+    "a_key": "A String",
+  },
+  "tableName": "A String", # Required. The name of the table containing the properties you're altering in the following format.databases/{database_id}/tables/{table_id}
+  "updateMask": "A String", # A field mask that specifies the metadata table properties that are overwritten by the update. Fields specified in the update_mask are relative to the resource (not to the full request). A field is overwritten if it is in the mask.For example, given the target properties: properties { a: 1 b: 2 } And an update properties: properties { a: 2 b: 3 c: 4 } then if the field mask is:paths: "properties.b", "properties.c"then the result will be: properties { a: 1 b: 3 c: 4 }
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # This resource represents a long-running operation that is the result of a network API call.
+  "done": True or False, # If the value is false, it means the operation is still in progress. If true, the operation is completed, and either error or response is available.
+  "error": { # The Status type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by gRPC (https://github.com/grpc). Each Status message contains three pieces of data: error code, error message, and error details.You can find out more about this error model and how to work with it in the API Design Guide (https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+    "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+    "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+      {
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    ],
+    "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+  },
+  "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+  "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the name should be a resource name ending with operations/{unique_id}.
+  "response": { # The normal, successful response of the operation. If the original method returns no data on success, such as Delete, the response is google.protobuf.Empty. If the original method is standard Get/Create/Update, the response should be the resource. For other methods, the response should have the type XxxResponse, where Xxx is the original method name. For example, if the original method name is TakeSnapshot(), the inferred response type is TakeSnapshotResponse.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+}
+
+
close()
Close httplib2 connections.
@@ -267,7 +316,6 @@

Method Details

"restores": [ # Output only. The latest restores of the metastore service. { # The details of a metadata restore operation. "backup": "A String", # Output only. The relative resource name of the metastore service backup to restore from, in the following form:projects/{project_id}/locations/{location_id}/services/{service_id}/backups/{backup_id}. - "backupLocation": "A String", # Optional. A Cloud Storage URI specifying where the backup artifacts are stored, in the format gs:///. "details": "A String", # Output only. The restore details containing the revision of the service to be restored to, in format of JSON. "endTime": "A String", # Output only. The time when the restore ended. "startTime": "A String", # Output only. The time when the restore started. @@ -501,7 +549,6 @@

Method Details

"restores": [ # Output only. The latest restores of the metastore service. { # The details of a metadata restore operation. "backup": "A String", # Output only. The relative resource name of the metastore service backup to restore from, in the following form:projects/{project_id}/locations/{location_id}/services/{service_id}/backups/{backup_id}. - "backupLocation": "A String", # Optional. A Cloud Storage URI specifying where the backup artifacts are stored, in the format gs:///. "details": "A String", # Output only. The restore details containing the revision of the service to be restored to, in format of JSON. "endTime": "A String", # Output only. The time when the restore ended. "startTime": "A String", # Output only. The time when the restore started. @@ -680,7 +727,6 @@

Method Details

"restores": [ # Output only. The latest restores of the metastore service. { # The details of a metadata restore operation. "backup": "A String", # Output only. The relative resource name of the metastore service backup to restore from, in the following form:projects/{project_id}/locations/{location_id}/services/{service_id}/backups/{backup_id}. - "backupLocation": "A String", # Optional. A Cloud Storage URI specifying where the backup artifacts are stored, in the format gs:///. "details": "A String", # Output only. The restore details containing the revision of the service to be restored to, in format of JSON. "endTime": "A String", # Output only. The time when the restore ended. "startTime": "A String", # Output only. The time when the restore started. @@ -862,7 +908,6 @@

Method Details

"restores": [ # Output only. The latest restores of the metastore service. { # The details of a metadata restore operation. "backup": "A String", # Output only. The relative resource name of the metastore service backup to restore from, in the following form:projects/{project_id}/locations/{location_id}/services/{service_id}/backups/{backup_id}. - "backupLocation": "A String", # Optional. A Cloud Storage URI specifying where the backup artifacts are stored, in the format gs:///. "details": "A String", # Output only. The restore details containing the revision of the service to be restored to, in format of JSON. "endTime": "A String", # Output only. The time when the restore ended. "startTime": "A String", # Output only. The time when the restore started. diff --git a/docs/dyn/metastore_v1beta.projects.locations.services.backups.html b/docs/dyn/metastore_v1beta.projects.locations.services.backups.html index 74ccbb82a45..2d9756cb133 100644 --- a/docs/dyn/metastore_v1beta.projects.locations.services.backups.html +++ b/docs/dyn/metastore_v1beta.projects.locations.services.backups.html @@ -196,7 +196,6 @@

Method Details

"restores": [ # Output only. The latest restores of the metastore service. { # The details of a metadata restore operation. "backup": "A String", # Output only. The relative resource name of the metastore service backup to restore from, in the following form:projects/{project_id}/locations/{location_id}/services/{service_id}/backups/{backup_id}. - "backupLocation": "A String", # Optional. A Cloud Storage URI specifying where the backup artifacts are stored, in the format gs:///. "details": "A String", # Output only. The restore details containing the revision of the service to be restored to, in format of JSON. "endTime": "A String", # Output only. The time when the restore ended. "startTime": "A String", # Output only. The time when the restore started. @@ -396,7 +395,6 @@

Method Details

"restores": [ # Output only. The latest restores of the metastore service. { # The details of a metadata restore operation. "backup": "A String", # Output only. The relative resource name of the metastore service backup to restore from, in the following form:projects/{project_id}/locations/{location_id}/services/{service_id}/backups/{backup_id}. - "backupLocation": "A String", # Optional. A Cloud Storage URI specifying where the backup artifacts are stored, in the format gs:///. "details": "A String", # Output only. The restore details containing the revision of the service to be restored to, in format of JSON. "endTime": "A String", # Output only. The time when the restore ended. "startTime": "A String", # Output only. The time when the restore started. @@ -584,7 +582,6 @@

Method Details

"restores": [ # Output only. The latest restores of the metastore service. { # The details of a metadata restore operation. "backup": "A String", # Output only. The relative resource name of the metastore service backup to restore from, in the following form:projects/{project_id}/locations/{location_id}/services/{service_id}/backups/{backup_id}. - "backupLocation": "A String", # Optional. A Cloud Storage URI specifying where the backup artifacts are stored, in the format gs:///. "details": "A String", # Output only. The restore details containing the revision of the service to be restored to, in format of JSON. "endTime": "A String", # Output only. The time when the restore ended. "startTime": "A String", # Output only. The time when the restore started. diff --git a/docs/dyn/metastore_v1beta.projects.locations.services.html b/docs/dyn/metastore_v1beta.projects.locations.services.html index 0c0625ca7a5..78d8ca0460a 100644 --- a/docs/dyn/metastore_v1beta.projects.locations.services.html +++ b/docs/dyn/metastore_v1beta.projects.locations.services.html @@ -92,6 +92,9 @@

Instance Methods

alterLocation(service, body=None, x__xgafv=None)

Alter metadata resource location. The metadata resource can be a database, table, or partition. This functionality only updates the parent directory for the respective metadata resource and does not transfer any existing data to the new location.

+

+ alterTableProperties(service, body=None, x__xgafv=None)

+

Alter metadata table properties.

close()

Close httplib2 connections.

@@ -181,6 +184,52 @@

Method Details

}
+
+ alterTableProperties(service, body=None, x__xgafv=None) +
Alter metadata table properties.
+
+Args:
+  service: string, Required. The relative resource name of the Dataproc Metastore service that's being used to mutate metadata table properties, in the following format:projects/{project_id}/locations/{location_id}/services/{service_id}. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Request message for DataprocMetastore.AlterTableProperties.
+  "properties": { # A map that describes the desired values to mutate. If update_mask is empty, the properties will not update. Otherwise, the properties only alters the value whose associated paths exist in the update mask
+    "a_key": "A String",
+  },
+  "tableName": "A String", # Required. The name of the table containing the properties you're altering in the following format.databases/{database_id}/tables/{table_id}
+  "updateMask": "A String", # A field mask that specifies the metadata table properties that are overwritten by the update. Fields specified in the update_mask are relative to the resource (not to the full request). A field is overwritten if it is in the mask.For example, given the target properties: properties { a: 1 b: 2 } And an update properties: properties { a: 2 b: 3 c: 4 } then if the field mask is:paths: "properties.b", "properties.c"then the result will be: properties { a: 1 b: 3 c: 4 }
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # This resource represents a long-running operation that is the result of a network API call.
+  "done": True or False, # If the value is false, it means the operation is still in progress. If true, the operation is completed, and either error or response is available.
+  "error": { # The Status type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by gRPC (https://github.com/grpc). Each Status message contains three pieces of data: error code, error message, and error details.You can find out more about this error model and how to work with it in the API Design Guide (https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+    "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+    "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+      {
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    ],
+    "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+  },
+  "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+  "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the name should be a resource name ending with operations/{unique_id}.
+  "response": { # The normal, successful response of the operation. If the original method returns no data on success, such as Delete, the response is google.protobuf.Empty. If the original method is standard Get/Create/Update, the response should be the resource. For other methods, the response should have the type XxxResponse, where Xxx is the original method name. For example, if the original method name is TakeSnapshot(), the inferred response type is TakeSnapshotResponse.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+}
+
+
close()
Close httplib2 connections.
@@ -267,7 +316,6 @@

Method Details

"restores": [ # Output only. The latest restores of the metastore service. { # The details of a metadata restore operation. "backup": "A String", # Output only. The relative resource name of the metastore service backup to restore from, in the following form:projects/{project_id}/locations/{location_id}/services/{service_id}/backups/{backup_id}. - "backupLocation": "A String", # Optional. A Cloud Storage URI specifying where the backup artifacts are stored, in the format gs:///. "details": "A String", # Output only. The restore details containing the revision of the service to be restored to, in format of JSON. "endTime": "A String", # Output only. The time when the restore ended. "startTime": "A String", # Output only. The time when the restore started. @@ -501,7 +549,6 @@

Method Details

"restores": [ # Output only. The latest restores of the metastore service. { # The details of a metadata restore operation. "backup": "A String", # Output only. The relative resource name of the metastore service backup to restore from, in the following form:projects/{project_id}/locations/{location_id}/services/{service_id}/backups/{backup_id}. - "backupLocation": "A String", # Optional. A Cloud Storage URI specifying where the backup artifacts are stored, in the format gs:///. "details": "A String", # Output only. The restore details containing the revision of the service to be restored to, in format of JSON. "endTime": "A String", # Output only. The time when the restore ended. "startTime": "A String", # Output only. The time when the restore started. @@ -680,7 +727,6 @@

Method Details

"restores": [ # Output only. The latest restores of the metastore service. { # The details of a metadata restore operation. "backup": "A String", # Output only. The relative resource name of the metastore service backup to restore from, in the following form:projects/{project_id}/locations/{location_id}/services/{service_id}/backups/{backup_id}. - "backupLocation": "A String", # Optional. A Cloud Storage URI specifying where the backup artifacts are stored, in the format gs:///. "details": "A String", # Output only. The restore details containing the revision of the service to be restored to, in format of JSON. "endTime": "A String", # Output only. The time when the restore ended. "startTime": "A String", # Output only. The time when the restore started. @@ -862,7 +908,6 @@

Method Details

"restores": [ # Output only. The latest restores of the metastore service. { # The details of a metadata restore operation. "backup": "A String", # Output only. The relative resource name of the metastore service backup to restore from, in the following form:projects/{project_id}/locations/{location_id}/services/{service_id}/backups/{backup_id}. - "backupLocation": "A String", # Optional. A Cloud Storage URI specifying where the backup artifacts are stored, in the format gs:///. "details": "A String", # Output only. The restore details containing the revision of the service to be restored to, in format of JSON. "endTime": "A String", # Output only. The time when the restore ended. "startTime": "A String", # Output only. The time when the restore started. diff --git a/docs/dyn/migrationcenter_v1.projects.locations.reportConfigs.reports.html b/docs/dyn/migrationcenter_v1.projects.locations.reportConfigs.reports.html index 1d74dd6f5b5..1bcdc1f76e5 100644 --- a/docs/dyn/migrationcenter_v1.projects.locations.reportConfigs.reports.html +++ b/docs/dyn/migrationcenter_v1.projects.locations.reportConfigs.reports.html @@ -145,7 +145,7 @@

Method Details

}, ], }, - "storageBytesHistogram": { # A Histogram Chart shows a distribution of values into buckets, showing a count of values which fall into a bucket. # Histogram showing a distribution of memory sizes. + "storageBytesHistogram": { # A Histogram Chart shows a distribution of values into buckets, showing a count of values which fall into a bucket. # Histogram showing a distribution of storage sizes. "buckets": [ # Buckets in the histogram. There will be `n+1` buckets matching `n` lower bounds in the request. The first bucket will be from -infinity to the first bound. Subsequent buckets will be between one bound and the next. The final bucket will be from the final bound to infinity. { # A histogram bucket with a lower and upper bound, and a count of items with a field value between those bounds. The lower bound is inclusive and the upper bound is exclusive. Lower bound may be -infinity and upper bound may be infinity. "count": "A String", # Count of items in the bucket. @@ -196,7 +196,7 @@

Method Details

}, ], }, - "storageBytesHistogram": { # A Histogram Chart shows a distribution of values into buckets, showing a count of values which fall into a bucket. # Histogram showing a distribution of memory sizes. + "storageBytesHistogram": { # A Histogram Chart shows a distribution of values into buckets, showing a count of values which fall into a bucket. # Histogram showing a distribution of storage sizes. "buckets": [ # Buckets in the histogram. There will be `n+1` buckets matching `n` lower bounds in the request. The first bucket will be from -infinity to the first bound. Subsequent buckets will be between one bound and the next. The final bucket will be from the final bound to infinity. { # A histogram bucket with a lower and upper bound, and a count of items with a field value between those bounds. The lower bound is inclusive and the upper bound is exclusive. Lower bound may be -infinity and upper bound may be infinity. "count": "A String", # Count of items in the bucket. @@ -468,7 +468,7 @@

Method Details

}, ], }, - "storageBytesHistogram": { # A Histogram Chart shows a distribution of values into buckets, showing a count of values which fall into a bucket. # Histogram showing a distribution of memory sizes. + "storageBytesHistogram": { # A Histogram Chart shows a distribution of values into buckets, showing a count of values which fall into a bucket. # Histogram showing a distribution of storage sizes. "buckets": [ # Buckets in the histogram. There will be `n+1` buckets matching `n` lower bounds in the request. The first bucket will be from -infinity to the first bound. Subsequent buckets will be between one bound and the next. The final bucket will be from the final bound to infinity. { # A histogram bucket with a lower and upper bound, and a count of items with a field value between those bounds. The lower bound is inclusive and the upper bound is exclusive. Lower bound may be -infinity and upper bound may be infinity. "count": "A String", # Count of items in the bucket. @@ -519,7 +519,7 @@

Method Details

}, ], }, - "storageBytesHistogram": { # A Histogram Chart shows a distribution of values into buckets, showing a count of values which fall into a bucket. # Histogram showing a distribution of memory sizes. + "storageBytesHistogram": { # A Histogram Chart shows a distribution of values into buckets, showing a count of values which fall into a bucket. # Histogram showing a distribution of storage sizes. "buckets": [ # Buckets in the histogram. There will be `n+1` buckets matching `n` lower bounds in the request. The first bucket will be from -infinity to the first bound. Subsequent buckets will be between one bound and the next. The final bucket will be from the final bound to infinity. { # A histogram bucket with a lower and upper bound, and a count of items with a field value between those bounds. The lower bound is inclusive and the upper bound is exclusive. Lower bound may be -infinity and upper bound may be infinity. "count": "A String", # Count of items in the bucket. @@ -732,7 +732,7 @@

Method Details

}, ], }, - "storageBytesHistogram": { # A Histogram Chart shows a distribution of values into buckets, showing a count of values which fall into a bucket. # Histogram showing a distribution of memory sizes. + "storageBytesHistogram": { # A Histogram Chart shows a distribution of values into buckets, showing a count of values which fall into a bucket. # Histogram showing a distribution of storage sizes. "buckets": [ # Buckets in the histogram. There will be `n+1` buckets matching `n` lower bounds in the request. The first bucket will be from -infinity to the first bound. Subsequent buckets will be between one bound and the next. The final bucket will be from the final bound to infinity. { # A histogram bucket with a lower and upper bound, and a count of items with a field value between those bounds. The lower bound is inclusive and the upper bound is exclusive. Lower bound may be -infinity and upper bound may be infinity. "count": "A String", # Count of items in the bucket. @@ -783,7 +783,7 @@

Method Details

}, ], }, - "storageBytesHistogram": { # A Histogram Chart shows a distribution of values into buckets, showing a count of values which fall into a bucket. # Histogram showing a distribution of memory sizes. + "storageBytesHistogram": { # A Histogram Chart shows a distribution of values into buckets, showing a count of values which fall into a bucket. # Histogram showing a distribution of storage sizes. "buckets": [ # Buckets in the histogram. There will be `n+1` buckets matching `n` lower bounds in the request. The first bucket will be from -infinity to the first bound. Subsequent buckets will be between one bound and the next. The final bucket will be from the final bound to infinity. { # A histogram bucket with a lower and upper bound, and a count of items with a field value between those bounds. The lower bound is inclusive and the upper bound is exclusive. Lower bound may be -infinity and upper bound may be infinity. "count": "A String", # Count of items in the bucket. diff --git a/docs/dyn/migrationcenter_v1alpha1.projects.locations.reportConfigs.reports.html b/docs/dyn/migrationcenter_v1alpha1.projects.locations.reportConfigs.reports.html index c0bfa68debe..7ba3a75ae7c 100644 --- a/docs/dyn/migrationcenter_v1alpha1.projects.locations.reportConfigs.reports.html +++ b/docs/dyn/migrationcenter_v1alpha1.projects.locations.reportConfigs.reports.html @@ -161,7 +161,7 @@

Method Details

}, ], }, - "storageBytesHistogram": { # A Histogram Chart shows a distribution of values into buckets, showing a count of values which fall into a bucket. # Histogram showing a distribution of memory sizes. + "storageBytesHistogram": { # A Histogram Chart shows a distribution of values into buckets, showing a count of values which fall into a bucket. # Histogram showing a distribution of storage sizes. "buckets": [ # Buckets in the histogram. There will be `n+1` buckets matching `n` lower bounds in the request. The first bucket will be from -infinity to the first bound. Subsequent buckets will be between one bound and the next. The final bucket will be from the final bound to infinity. { # A histogram bucket with a lower and upper bound, and a count of items with a field value between those bounds. The lower bound is inclusive and the upper bound is exclusive. Lower bound may be -infinity and upper bound may be infinity. "count": "A String", # Count of items in the bucket. @@ -236,7 +236,7 @@

Method Details

}, ], }, - "storageBytesHistogram": { # A Histogram Chart shows a distribution of values into buckets, showing a count of values which fall into a bucket. # Histogram showing a distribution of memory sizes. + "storageBytesHistogram": { # A Histogram Chart shows a distribution of values into buckets, showing a count of values which fall into a bucket. # Histogram showing a distribution of storage sizes. "buckets": [ # Buckets in the histogram. There will be `n+1` buckets matching `n` lower bounds in the request. The first bucket will be from -infinity to the first bound. Subsequent buckets will be between one bound and the next. The final bucket will be from the final bound to infinity. { # A histogram bucket with a lower and upper bound, and a count of items with a field value between those bounds. The lower bound is inclusive and the upper bound is exclusive. Lower bound may be -infinity and upper bound may be infinity. "count": "A String", # Count of items in the bucket. @@ -545,7 +545,7 @@

Method Details

}, ], }, - "storageBytesHistogram": { # A Histogram Chart shows a distribution of values into buckets, showing a count of values which fall into a bucket. # Histogram showing a distribution of memory sizes. + "storageBytesHistogram": { # A Histogram Chart shows a distribution of values into buckets, showing a count of values which fall into a bucket. # Histogram showing a distribution of storage sizes. "buckets": [ # Buckets in the histogram. There will be `n+1` buckets matching `n` lower bounds in the request. The first bucket will be from -infinity to the first bound. Subsequent buckets will be between one bound and the next. The final bucket will be from the final bound to infinity. { # A histogram bucket with a lower and upper bound, and a count of items with a field value between those bounds. The lower bound is inclusive and the upper bound is exclusive. Lower bound may be -infinity and upper bound may be infinity. "count": "A String", # Count of items in the bucket. @@ -620,7 +620,7 @@

Method Details

}, ], }, - "storageBytesHistogram": { # A Histogram Chart shows a distribution of values into buckets, showing a count of values which fall into a bucket. # Histogram showing a distribution of memory sizes. + "storageBytesHistogram": { # A Histogram Chart shows a distribution of values into buckets, showing a count of values which fall into a bucket. # Histogram showing a distribution of storage sizes. "buckets": [ # Buckets in the histogram. There will be `n+1` buckets matching `n` lower bounds in the request. The first bucket will be from -infinity to the first bound. Subsequent buckets will be between one bound and the next. The final bucket will be from the final bound to infinity. { # A histogram bucket with a lower and upper bound, and a count of items with a field value between those bounds. The lower bound is inclusive and the upper bound is exclusive. Lower bound may be -infinity and upper bound may be infinity. "count": "A String", # Count of items in the bucket. @@ -870,7 +870,7 @@

Method Details

}, ], }, - "storageBytesHistogram": { # A Histogram Chart shows a distribution of values into buckets, showing a count of values which fall into a bucket. # Histogram showing a distribution of memory sizes. + "storageBytesHistogram": { # A Histogram Chart shows a distribution of values into buckets, showing a count of values which fall into a bucket. # Histogram showing a distribution of storage sizes. "buckets": [ # Buckets in the histogram. There will be `n+1` buckets matching `n` lower bounds in the request. The first bucket will be from -infinity to the first bound. Subsequent buckets will be between one bound and the next. The final bucket will be from the final bound to infinity. { # A histogram bucket with a lower and upper bound, and a count of items with a field value between those bounds. The lower bound is inclusive and the upper bound is exclusive. Lower bound may be -infinity and upper bound may be infinity. "count": "A String", # Count of items in the bucket. @@ -945,7 +945,7 @@

Method Details

}, ], }, - "storageBytesHistogram": { # A Histogram Chart shows a distribution of values into buckets, showing a count of values which fall into a bucket. # Histogram showing a distribution of memory sizes. + "storageBytesHistogram": { # A Histogram Chart shows a distribution of values into buckets, showing a count of values which fall into a bucket. # Histogram showing a distribution of storage sizes. "buckets": [ # Buckets in the histogram. There will be `n+1` buckets matching `n` lower bounds in the request. The first bucket will be from -infinity to the first bound. Subsequent buckets will be between one bound and the next. The final bucket will be from the final bound to infinity. { # A histogram bucket with a lower and upper bound, and a count of items with a field value between those bounds. The lower bound is inclusive and the upper bound is exclusive. Lower bound may be -infinity and upper bound may be infinity. "count": "A String", # Count of items in the bucket. diff --git a/docs/dyn/monitoring_v1.projects.dashboards.html b/docs/dyn/monitoring_v1.projects.dashboards.html index b379652988f..54c451c68d8 100644 --- a/docs/dyn/monitoring_v1.projects.dashboards.html +++ b/docs/dyn/monitoring_v1.projects.dashboards.html @@ -343,6 +343,10 @@

Method Details

"unitOverride": "A String", # The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the unit (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in MetricDescriptor. }, }, + "sectionHeader": { # A widget that defines a new section header. Sections populate a table of contents and allow easier navigation of long-form content. # A widget that defines a section header for easier navigation of the dashboard. + "dividerBelow": True or False, # Whether to insert a divider below the section in the table of contents + "subtitle": "A String", # The subtitle of the section + }, "text": { # A widget that displays textual content. # A raw string or markdown displaying textual content. "content": "A String", # The text content to be displayed. "format": "A String", # How the text content is formatted. @@ -856,6 +860,10 @@

Method Details

"unitOverride": "A String", # The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the unit (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in MetricDescriptor. }, }, + "sectionHeader": { # A widget that defines a new section header. Sections populate a table of contents and allow easier navigation of long-form content. # A widget that defines a section header for easier navigation of the dashboard. + "dividerBelow": True or False, # Whether to insert a divider below the section in the table of contents + "subtitle": "A String", # The subtitle of the section + }, "text": { # A widget that displays textual content. # A raw string or markdown displaying textual content. "content": "A String", # The text content to be displayed. "format": "A String", # How the text content is formatted. @@ -1362,6 +1370,10 @@

Method Details

"unitOverride": "A String", # The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the unit (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in MetricDescriptor. }, }, + "sectionHeader": { # A widget that defines a new section header. Sections populate a table of contents and allow easier navigation of long-form content. # A widget that defines a section header for easier navigation of the dashboard. + "dividerBelow": True or False, # Whether to insert a divider below the section in the table of contents + "subtitle": "A String", # The subtitle of the section + }, "text": { # A widget that displays textual content. # A raw string or markdown displaying textual content. "content": "A String", # The text content to be displayed. "format": "A String", # How the text content is formatted. @@ -1870,6 +1882,10 @@

Method Details

"unitOverride": "A String", # The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the unit (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in MetricDescriptor. }, }, + "sectionHeader": { # A widget that defines a new section header. Sections populate a table of contents and allow easier navigation of long-form content. # A widget that defines a section header for easier navigation of the dashboard. + "dividerBelow": True or False, # Whether to insert a divider below the section in the table of contents + "subtitle": "A String", # The subtitle of the section + }, "text": { # A widget that displays textual content. # A raw string or markdown displaying textual content. "content": "A String", # The text content to be displayed. "format": "A String", # How the text content is formatted. @@ -2387,6 +2403,10 @@

Method Details

"unitOverride": "A String", # The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the unit (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in MetricDescriptor. }, }, + "sectionHeader": { # A widget that defines a new section header. Sections populate a table of contents and allow easier navigation of long-form content. # A widget that defines a section header for easier navigation of the dashboard. + "dividerBelow": True or False, # Whether to insert a divider below the section in the table of contents + "subtitle": "A String", # The subtitle of the section + }, "text": { # A widget that displays textual content. # A raw string or markdown displaying textual content. "content": "A String", # The text content to be displayed. "format": "A String", # How the text content is formatted. @@ -2900,6 +2920,10 @@

Method Details

"unitOverride": "A String", # The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the unit (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in MetricDescriptor. }, }, + "sectionHeader": { # A widget that defines a new section header. Sections populate a table of contents and allow easier navigation of long-form content. # A widget that defines a section header for easier navigation of the dashboard. + "dividerBelow": True or False, # Whether to insert a divider below the section in the table of contents + "subtitle": "A String", # The subtitle of the section + }, "text": { # A widget that displays textual content. # A raw string or markdown displaying textual content. "content": "A String", # The text content to be displayed. "format": "A String", # How the text content is formatted. @@ -3406,6 +3430,10 @@

Method Details

"unitOverride": "A String", # The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the unit (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in MetricDescriptor. }, }, + "sectionHeader": { # A widget that defines a new section header. Sections populate a table of contents and allow easier navigation of long-form content. # A widget that defines a section header for easier navigation of the dashboard. + "dividerBelow": True or False, # Whether to insert a divider below the section in the table of contents + "subtitle": "A String", # The subtitle of the section + }, "text": { # A widget that displays textual content. # A raw string or markdown displaying textual content. "content": "A String", # The text content to be displayed. "format": "A String", # How the text content is formatted. @@ -3914,6 +3942,10 @@

Method Details

"unitOverride": "A String", # The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the unit (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in MetricDescriptor. }, }, + "sectionHeader": { # A widget that defines a new section header. Sections populate a table of contents and allow easier navigation of long-form content. # A widget that defines a section header for easier navigation of the dashboard. + "dividerBelow": True or False, # Whether to insert a divider below the section in the table of contents + "subtitle": "A String", # The subtitle of the section + }, "text": { # A widget that displays textual content. # A raw string or markdown displaying textual content. "content": "A String", # The text content to be displayed. "format": "A String", # How the text content is formatted. @@ -4455,6 +4487,10 @@

Method Details

"unitOverride": "A String", # The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the unit (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in MetricDescriptor. }, }, + "sectionHeader": { # A widget that defines a new section header. Sections populate a table of contents and allow easier navigation of long-form content. # A widget that defines a section header for easier navigation of the dashboard. + "dividerBelow": True or False, # Whether to insert a divider below the section in the table of contents + "subtitle": "A String", # The subtitle of the section + }, "text": { # A widget that displays textual content. # A raw string or markdown displaying textual content. "content": "A String", # The text content to be displayed. "format": "A String", # How the text content is formatted. @@ -4968,6 +5004,10 @@

Method Details

"unitOverride": "A String", # The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the unit (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in MetricDescriptor. }, }, + "sectionHeader": { # A widget that defines a new section header. Sections populate a table of contents and allow easier navigation of long-form content. # A widget that defines a section header for easier navigation of the dashboard. + "dividerBelow": True or False, # Whether to insert a divider below the section in the table of contents + "subtitle": "A String", # The subtitle of the section + }, "text": { # A widget that displays textual content. # A raw string or markdown displaying textual content. "content": "A String", # The text content to be displayed. "format": "A String", # How the text content is formatted. @@ -5474,6 +5514,10 @@

Method Details

"unitOverride": "A String", # The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the unit (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in MetricDescriptor. }, }, + "sectionHeader": { # A widget that defines a new section header. Sections populate a table of contents and allow easier navigation of long-form content. # A widget that defines a section header for easier navigation of the dashboard. + "dividerBelow": True or False, # Whether to insert a divider below the section in the table of contents + "subtitle": "A String", # The subtitle of the section + }, "text": { # A widget that displays textual content. # A raw string or markdown displaying textual content. "content": "A String", # The text content to be displayed. "format": "A String", # How the text content is formatted. @@ -5982,6 +6026,10 @@

Method Details

"unitOverride": "A String", # The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the unit (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in MetricDescriptor. }, }, + "sectionHeader": { # A widget that defines a new section header. Sections populate a table of contents and allow easier navigation of long-form content. # A widget that defines a section header for easier navigation of the dashboard. + "dividerBelow": True or False, # Whether to insert a divider below the section in the table of contents + "subtitle": "A String", # The subtitle of the section + }, "text": { # A widget that displays textual content. # A raw string or markdown displaying textual content. "content": "A String", # The text content to be displayed. "format": "A String", # How the text content is formatted. @@ -6509,6 +6557,10 @@

Method Details

"unitOverride": "A String", # The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the unit (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in MetricDescriptor. }, }, + "sectionHeader": { # A widget that defines a new section header. Sections populate a table of contents and allow easier navigation of long-form content. # A widget that defines a section header for easier navigation of the dashboard. + "dividerBelow": True or False, # Whether to insert a divider below the section in the table of contents + "subtitle": "A String", # The subtitle of the section + }, "text": { # A widget that displays textual content. # A raw string or markdown displaying textual content. "content": "A String", # The text content to be displayed. "format": "A String", # How the text content is formatted. @@ -7022,6 +7074,10 @@

Method Details

"unitOverride": "A String", # The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the unit (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in MetricDescriptor. }, }, + "sectionHeader": { # A widget that defines a new section header. Sections populate a table of contents and allow easier navigation of long-form content. # A widget that defines a section header for easier navigation of the dashboard. + "dividerBelow": True or False, # Whether to insert a divider below the section in the table of contents + "subtitle": "A String", # The subtitle of the section + }, "text": { # A widget that displays textual content. # A raw string or markdown displaying textual content. "content": "A String", # The text content to be displayed. "format": "A String", # How the text content is formatted. @@ -7528,6 +7584,10 @@

Method Details

"unitOverride": "A String", # The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the unit (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in MetricDescriptor. }, }, + "sectionHeader": { # A widget that defines a new section header. Sections populate a table of contents and allow easier navigation of long-form content. # A widget that defines a section header for easier navigation of the dashboard. + "dividerBelow": True or False, # Whether to insert a divider below the section in the table of contents + "subtitle": "A String", # The subtitle of the section + }, "text": { # A widget that displays textual content. # A raw string or markdown displaying textual content. "content": "A String", # The text content to be displayed. "format": "A String", # How the text content is formatted. @@ -8036,6 +8096,10 @@

Method Details

"unitOverride": "A String", # The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the unit (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in MetricDescriptor. }, }, + "sectionHeader": { # A widget that defines a new section header. Sections populate a table of contents and allow easier navigation of long-form content. # A widget that defines a section header for easier navigation of the dashboard. + "dividerBelow": True or False, # Whether to insert a divider below the section in the table of contents + "subtitle": "A String", # The subtitle of the section + }, "text": { # A widget that displays textual content. # A raw string or markdown displaying textual content. "content": "A String", # The text content to be displayed. "format": "A String", # How the text content is formatted. @@ -8571,6 +8635,10 @@

Method Details

"unitOverride": "A String", # The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the unit (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in MetricDescriptor. }, }, + "sectionHeader": { # A widget that defines a new section header. Sections populate a table of contents and allow easier navigation of long-form content. # A widget that defines a section header for easier navigation of the dashboard. + "dividerBelow": True or False, # Whether to insert a divider below the section in the table of contents + "subtitle": "A String", # The subtitle of the section + }, "text": { # A widget that displays textual content. # A raw string or markdown displaying textual content. "content": "A String", # The text content to be displayed. "format": "A String", # How the text content is formatted. @@ -9084,6 +9152,10 @@

Method Details

"unitOverride": "A String", # The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the unit (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in MetricDescriptor. }, }, + "sectionHeader": { # A widget that defines a new section header. Sections populate a table of contents and allow easier navigation of long-form content. # A widget that defines a section header for easier navigation of the dashboard. + "dividerBelow": True or False, # Whether to insert a divider below the section in the table of contents + "subtitle": "A String", # The subtitle of the section + }, "text": { # A widget that displays textual content. # A raw string or markdown displaying textual content. "content": "A String", # The text content to be displayed. "format": "A String", # How the text content is formatted. @@ -9590,6 +9662,10 @@

Method Details

"unitOverride": "A String", # The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the unit (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in MetricDescriptor. }, }, + "sectionHeader": { # A widget that defines a new section header. Sections populate a table of contents and allow easier navigation of long-form content. # A widget that defines a section header for easier navigation of the dashboard. + "dividerBelow": True or False, # Whether to insert a divider below the section in the table of contents + "subtitle": "A String", # The subtitle of the section + }, "text": { # A widget that displays textual content. # A raw string or markdown displaying textual content. "content": "A String", # The text content to be displayed. "format": "A String", # How the text content is formatted. @@ -10098,6 +10174,10 @@

Method Details

"unitOverride": "A String", # The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the unit (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in MetricDescriptor. }, }, + "sectionHeader": { # A widget that defines a new section header. Sections populate a table of contents and allow easier navigation of long-form content. # A widget that defines a section header for easier navigation of the dashboard. + "dividerBelow": True or False, # Whether to insert a divider below the section in the table of contents + "subtitle": "A String", # The subtitle of the section + }, "text": { # A widget that displays textual content. # A raw string or markdown displaying textual content. "content": "A String", # The text content to be displayed. "format": "A String", # How the text content is formatted. @@ -10615,6 +10695,10 @@

Method Details

"unitOverride": "A String", # The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the unit (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in MetricDescriptor. }, }, + "sectionHeader": { # A widget that defines a new section header. Sections populate a table of contents and allow easier navigation of long-form content. # A widget that defines a section header for easier navigation of the dashboard. + "dividerBelow": True or False, # Whether to insert a divider below the section in the table of contents + "subtitle": "A String", # The subtitle of the section + }, "text": { # A widget that displays textual content. # A raw string or markdown displaying textual content. "content": "A String", # The text content to be displayed. "format": "A String", # How the text content is formatted. @@ -11128,6 +11212,10 @@

Method Details

"unitOverride": "A String", # The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the unit (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in MetricDescriptor. }, }, + "sectionHeader": { # A widget that defines a new section header. Sections populate a table of contents and allow easier navigation of long-form content. # A widget that defines a section header for easier navigation of the dashboard. + "dividerBelow": True or False, # Whether to insert a divider below the section in the table of contents + "subtitle": "A String", # The subtitle of the section + }, "text": { # A widget that displays textual content. # A raw string or markdown displaying textual content. "content": "A String", # The text content to be displayed. "format": "A String", # How the text content is formatted. @@ -11634,6 +11722,10 @@

Method Details

"unitOverride": "A String", # The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the unit (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in MetricDescriptor. }, }, + "sectionHeader": { # A widget that defines a new section header. Sections populate a table of contents and allow easier navigation of long-form content. # A widget that defines a section header for easier navigation of the dashboard. + "dividerBelow": True or False, # Whether to insert a divider below the section in the table of contents + "subtitle": "A String", # The subtitle of the section + }, "text": { # A widget that displays textual content. # A raw string or markdown displaying textual content. "content": "A String", # The text content to be displayed. "format": "A String", # How the text content is formatted. @@ -12142,6 +12234,10 @@

Method Details

"unitOverride": "A String", # The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the unit (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in MetricDescriptor. }, }, + "sectionHeader": { # A widget that defines a new section header. Sections populate a table of contents and allow easier navigation of long-form content. # A widget that defines a section header for easier navigation of the dashboard. + "dividerBelow": True or False, # Whether to insert a divider below the section in the table of contents + "subtitle": "A String", # The subtitle of the section + }, "text": { # A widget that displays textual content. # A raw string or markdown displaying textual content. "content": "A String", # The text content to be displayed. "format": "A String", # How the text content is formatted. diff --git a/docs/dyn/networkmanagement_v1.projects.locations.global_.connectivityTests.html b/docs/dyn/networkmanagement_v1.projects.locations.global_.connectivityTests.html index 581b1ca3690..5df2d862584 100644 --- a/docs/dyn/networkmanagement_v1.projects.locations.global_.connectivityTests.html +++ b/docs/dyn/networkmanagement_v1.projects.locations.global_.connectivityTests.html @@ -140,7 +140,7 @@

Method Details

"forwardingRuleTarget": "A String", # Output only. Specifies the type of the target of the forwarding rule. "gkeMasterCluster": "A String", # A cluster URI for [Google Kubernetes Engine master](https://cloud.google.com/kubernetes-engine/docs/concepts/cluster-architecture). "instance": "A String", # A Compute Engine instance URI. - "ipAddress": "A String", # The IP address of the endpoint, which can be an external or internal IP. An IPv6 address is only allowed when the test's destination is a [global load balancer VIP](https://cloud.google.com/load-balancing/docs/load-balancing-overview). + "ipAddress": "A String", # The IP address of the endpoint, which can be an external or internal IP. "loadBalancerId": "A String", # Output only. ID of the load balancer the forwarding rule points to. Empty for forwarding rules not related to load balancers. "loadBalancerType": "A String", # Output only. Type of the load balancer the forwarding rule points to. "network": "A String", # A Compute Engine network URI. @@ -257,7 +257,10 @@

Method Details

"description": "A String", # A description of the step. Usually this is a summary of the state. "drop": { # Details of the final state "drop" and associated resource. # Display information of the final state "drop" and reason. "cause": "A String", # Cause that the packet is dropped. + "destinationIp": "A String", # Destination IP address of the dropped packet (if relevant). + "region": "A String", # Region of the dropped packet (if relevant). "resourceUri": "A String", # URI of the resource that caused the drop. + "sourceIp": "A String", # Source IP address of the dropped packet (if relevant). }, "endpoint": { # For display only. The specification of the endpoints for the test. EndpointInfo is derived from source and destination Endpoint and validated by the backend data plane model. # Display information of the source and destination under analysis. The endpoint information in an intermediate state may differ with the initial input, as it might be modified by state like NAT, or Connection Proxy. "destinationIp": "A String", # Destination IP address. @@ -339,12 +342,49 @@

Method Details

"healthCheckUri": "A String", # URI of the health check for the load balancer. Deprecated and no longer populated as different load balancer backends might have different health checks. "loadBalancerType": "A String", # Type of the load balancer. }, + "loadBalancerBackendInfo": { # For display only. Metadata associated with the load balancer backend. # Display information of a specific load balancer backend. + "backendDisplayName": "A String", # Display name of the backend. For example, it might be an instance name for the instance group backends, or an IP address and port for zonal network endpoint group backends. + "backendServiceUri": "A String", # URI of the backend service this backend belongs to (if applicable). + "healthCheckConfigState": "A String", # Output only. Health check configuration state for the backend. This is a result of the static firewall analysis (verifying that health check traffic from required IP ranges to the backend is allowed or not). The backend might still be unhealthy even if these firewalls are configured. Please refer to the documentation for more information: https://cloud.google.com/load-balancing/docs/firewall-rules + "healthCheckUri": "A String", # URI of the health check attached to this backend (if applicable). + "instanceGroupUri": "A String", # URI of the instance group this backend belongs to (if applicable). + "instanceUri": "A String", # URI of the backend instance (if applicable). Populated for instance group backends, and zonal NEG backends. + "networkEndpointGroupUri": "A String", # URI of the network endpoint group this backend belongs to (if applicable). + }, + "nat": { # For display only. Metadata associated with NAT. # Display information of a NAT. + "natGatewayName": "A String", # The name of Cloud NAT Gateway. Only valid when type is CLOUD_NAT. + "networkUri": "A String", # URI of the network where NAT translation takes place. + "newDestinationIp": "A String", # Destination IP address after NAT translation. + "newDestinationPort": 42, # Destination port after NAT translation. Only valid when protocol is TCP or UDP. + "newSourceIp": "A String", # Source IP address after NAT translation. + "newSourcePort": 42, # Source port after NAT translation. Only valid when protocol is TCP or UDP. + "oldDestinationIp": "A String", # Destination IP address before NAT translation. + "oldDestinationPort": 42, # Destination port before NAT translation. Only valid when protocol is TCP or UDP. + "oldSourceIp": "A String", # Source IP address before NAT translation. + "oldSourcePort": 42, # Source port before NAT translation. Only valid when protocol is TCP or UDP. + "protocol": "A String", # IP protocol in string format, for example: "TCP", "UDP", "ICMP". + "routerUri": "A String", # Uri of the Cloud Router. Only valid when type is CLOUD_NAT. + "type": "A String", # Type of NAT. + }, "network": { # For display only. Metadata associated with a Compute Engine network. # Display information of a Google Cloud network. "displayName": "A String", # Name of a Compute Engine network. "matchedIpRange": "A String", # The IP range that matches the test. "uri": "A String", # URI of a Compute Engine network. }, "projectId": "A String", # Project ID that contains the configuration this step is validating. + "proxyConnection": { # For display only. Metadata associated with ProxyConnection. # Display information of a ProxyConnection. + "networkUri": "A String", # URI of the network where connection is proxied. + "newDestinationIp": "A String", # Destination IP address of a new connection. + "newDestinationPort": 42, # Destination port of a new connection. Only valid when protocol is TCP or UDP. + "newSourceIp": "A String", # Source IP address of a new connection. + "newSourcePort": 42, # Source port of a new connection. Only valid when protocol is TCP or UDP. + "oldDestinationIp": "A String", # Destination IP address of an original connection + "oldDestinationPort": 42, # Destination port of an original connection. Only valid when protocol is TCP or UDP. + "oldSourceIp": "A String", # Source IP address of an original connection. + "oldSourcePort": 42, # Source port of an original connection. Only valid when protocol is TCP or UDP. + "protocol": "A String", # IP protocol in string format, for example: "TCP", "UDP", "ICMP". + "subnetUri": "A String", # Uri of proxy subnet. + }, "route": { # For display only. Metadata associated with a Compute Engine route. # Display information of a Compute Engine route. "destIpRange": "A String", # Destination IP range of the route. "destPortRanges": [ # Destination port ranges of the route. Policy based routes only. @@ -420,7 +460,7 @@

Method Details

"forwardingRuleTarget": "A String", # Output only. Specifies the type of the target of the forwarding rule. "gkeMasterCluster": "A String", # A cluster URI for [Google Kubernetes Engine master](https://cloud.google.com/kubernetes-engine/docs/concepts/cluster-architecture). "instance": "A String", # A Compute Engine instance URI. - "ipAddress": "A String", # The IP address of the endpoint, which can be an external or internal IP. An IPv6 address is only allowed when the test's destination is a [global load balancer VIP](https://cloud.google.com/load-balancing/docs/load-balancing-overview). + "ipAddress": "A String", # The IP address of the endpoint, which can be an external or internal IP. "loadBalancerId": "A String", # Output only. ID of the load balancer the forwarding rule points to. Empty for forwarding rules not related to load balancers. "loadBalancerType": "A String", # Output only. Type of the load balancer the forwarding rule points to. "network": "A String", # A Compute Engine network URI. @@ -528,7 +568,7 @@

Method Details

"forwardingRuleTarget": "A String", # Output only. Specifies the type of the target of the forwarding rule. "gkeMasterCluster": "A String", # A cluster URI for [Google Kubernetes Engine master](https://cloud.google.com/kubernetes-engine/docs/concepts/cluster-architecture). "instance": "A String", # A Compute Engine instance URI. - "ipAddress": "A String", # The IP address of the endpoint, which can be an external or internal IP. An IPv6 address is only allowed when the test's destination is a [global load balancer VIP](https://cloud.google.com/load-balancing/docs/load-balancing-overview). + "ipAddress": "A String", # The IP address of the endpoint, which can be an external or internal IP. "loadBalancerId": "A String", # Output only. ID of the load balancer the forwarding rule points to. Empty for forwarding rules not related to load balancers. "loadBalancerType": "A String", # Output only. Type of the load balancer the forwarding rule points to. "network": "A String", # A Compute Engine network URI. @@ -645,7 +685,10 @@

Method Details

"description": "A String", # A description of the step. Usually this is a summary of the state. "drop": { # Details of the final state "drop" and associated resource. # Display information of the final state "drop" and reason. "cause": "A String", # Cause that the packet is dropped. + "destinationIp": "A String", # Destination IP address of the dropped packet (if relevant). + "region": "A String", # Region of the dropped packet (if relevant). "resourceUri": "A String", # URI of the resource that caused the drop. + "sourceIp": "A String", # Source IP address of the dropped packet (if relevant). }, "endpoint": { # For display only. The specification of the endpoints for the test. EndpointInfo is derived from source and destination Endpoint and validated by the backend data plane model. # Display information of the source and destination under analysis. The endpoint information in an intermediate state may differ with the initial input, as it might be modified by state like NAT, or Connection Proxy. "destinationIp": "A String", # Destination IP address. @@ -727,12 +770,49 @@

Method Details

"healthCheckUri": "A String", # URI of the health check for the load balancer. Deprecated and no longer populated as different load balancer backends might have different health checks. "loadBalancerType": "A String", # Type of the load balancer. }, + "loadBalancerBackendInfo": { # For display only. Metadata associated with the load balancer backend. # Display information of a specific load balancer backend. + "backendDisplayName": "A String", # Display name of the backend. For example, it might be an instance name for the instance group backends, or an IP address and port for zonal network endpoint group backends. + "backendServiceUri": "A String", # URI of the backend service this backend belongs to (if applicable). + "healthCheckConfigState": "A String", # Output only. Health check configuration state for the backend. This is a result of the static firewall analysis (verifying that health check traffic from required IP ranges to the backend is allowed or not). The backend might still be unhealthy even if these firewalls are configured. Please refer to the documentation for more information: https://cloud.google.com/load-balancing/docs/firewall-rules + "healthCheckUri": "A String", # URI of the health check attached to this backend (if applicable). + "instanceGroupUri": "A String", # URI of the instance group this backend belongs to (if applicable). + "instanceUri": "A String", # URI of the backend instance (if applicable). Populated for instance group backends, and zonal NEG backends. + "networkEndpointGroupUri": "A String", # URI of the network endpoint group this backend belongs to (if applicable). + }, + "nat": { # For display only. Metadata associated with NAT. # Display information of a NAT. + "natGatewayName": "A String", # The name of Cloud NAT Gateway. Only valid when type is CLOUD_NAT. + "networkUri": "A String", # URI of the network where NAT translation takes place. + "newDestinationIp": "A String", # Destination IP address after NAT translation. + "newDestinationPort": 42, # Destination port after NAT translation. Only valid when protocol is TCP or UDP. + "newSourceIp": "A String", # Source IP address after NAT translation. + "newSourcePort": 42, # Source port after NAT translation. Only valid when protocol is TCP or UDP. + "oldDestinationIp": "A String", # Destination IP address before NAT translation. + "oldDestinationPort": 42, # Destination port before NAT translation. Only valid when protocol is TCP or UDP. + "oldSourceIp": "A String", # Source IP address before NAT translation. + "oldSourcePort": 42, # Source port before NAT translation. Only valid when protocol is TCP or UDP. + "protocol": "A String", # IP protocol in string format, for example: "TCP", "UDP", "ICMP". + "routerUri": "A String", # Uri of the Cloud Router. Only valid when type is CLOUD_NAT. + "type": "A String", # Type of NAT. + }, "network": { # For display only. Metadata associated with a Compute Engine network. # Display information of a Google Cloud network. "displayName": "A String", # Name of a Compute Engine network. "matchedIpRange": "A String", # The IP range that matches the test. "uri": "A String", # URI of a Compute Engine network. }, "projectId": "A String", # Project ID that contains the configuration this step is validating. + "proxyConnection": { # For display only. Metadata associated with ProxyConnection. # Display information of a ProxyConnection. + "networkUri": "A String", # URI of the network where connection is proxied. + "newDestinationIp": "A String", # Destination IP address of a new connection. + "newDestinationPort": 42, # Destination port of a new connection. Only valid when protocol is TCP or UDP. + "newSourceIp": "A String", # Source IP address of a new connection. + "newSourcePort": 42, # Source port of a new connection. Only valid when protocol is TCP or UDP. + "oldDestinationIp": "A String", # Destination IP address of an original connection + "oldDestinationPort": 42, # Destination port of an original connection. Only valid when protocol is TCP or UDP. + "oldSourceIp": "A String", # Source IP address of an original connection. + "oldSourcePort": 42, # Source port of an original connection. Only valid when protocol is TCP or UDP. + "protocol": "A String", # IP protocol in string format, for example: "TCP", "UDP", "ICMP". + "subnetUri": "A String", # Uri of proxy subnet. + }, "route": { # For display only. Metadata associated with a Compute Engine route. # Display information of a Compute Engine route. "destIpRange": "A String", # Destination IP range of the route. "destPortRanges": [ # Destination port ranges of the route. Policy based routes only. @@ -808,7 +888,7 @@

Method Details

"forwardingRuleTarget": "A String", # Output only. Specifies the type of the target of the forwarding rule. "gkeMasterCluster": "A String", # A cluster URI for [Google Kubernetes Engine master](https://cloud.google.com/kubernetes-engine/docs/concepts/cluster-architecture). "instance": "A String", # A Compute Engine instance URI. - "ipAddress": "A String", # The IP address of the endpoint, which can be an external or internal IP. An IPv6 address is only allowed when the test's destination is a [global load balancer VIP](https://cloud.google.com/load-balancing/docs/load-balancing-overview). + "ipAddress": "A String", # The IP address of the endpoint, which can be an external or internal IP. "loadBalancerId": "A String", # Output only. ID of the load balancer the forwarding rule points to. Empty for forwarding rules not related to load balancers. "loadBalancerType": "A String", # Output only. Type of the load balancer the forwarding rule points to. "network": "A String", # A Compute Engine network URI. @@ -907,7 +987,7 @@

Method Details

"forwardingRuleTarget": "A String", # Output only. Specifies the type of the target of the forwarding rule. "gkeMasterCluster": "A String", # A cluster URI for [Google Kubernetes Engine master](https://cloud.google.com/kubernetes-engine/docs/concepts/cluster-architecture). "instance": "A String", # A Compute Engine instance URI. - "ipAddress": "A String", # The IP address of the endpoint, which can be an external or internal IP. An IPv6 address is only allowed when the test's destination is a [global load balancer VIP](https://cloud.google.com/load-balancing/docs/load-balancing-overview). + "ipAddress": "A String", # The IP address of the endpoint, which can be an external or internal IP. "loadBalancerId": "A String", # Output only. ID of the load balancer the forwarding rule points to. Empty for forwarding rules not related to load balancers. "loadBalancerType": "A String", # Output only. Type of the load balancer the forwarding rule points to. "network": "A String", # A Compute Engine network URI. @@ -1024,7 +1104,10 @@

Method Details

"description": "A String", # A description of the step. Usually this is a summary of the state. "drop": { # Details of the final state "drop" and associated resource. # Display information of the final state "drop" and reason. "cause": "A String", # Cause that the packet is dropped. + "destinationIp": "A String", # Destination IP address of the dropped packet (if relevant). + "region": "A String", # Region of the dropped packet (if relevant). "resourceUri": "A String", # URI of the resource that caused the drop. + "sourceIp": "A String", # Source IP address of the dropped packet (if relevant). }, "endpoint": { # For display only. The specification of the endpoints for the test. EndpointInfo is derived from source and destination Endpoint and validated by the backend data plane model. # Display information of the source and destination under analysis. The endpoint information in an intermediate state may differ with the initial input, as it might be modified by state like NAT, or Connection Proxy. "destinationIp": "A String", # Destination IP address. @@ -1106,12 +1189,49 @@

Method Details

"healthCheckUri": "A String", # URI of the health check for the load balancer. Deprecated and no longer populated as different load balancer backends might have different health checks. "loadBalancerType": "A String", # Type of the load balancer. }, + "loadBalancerBackendInfo": { # For display only. Metadata associated with the load balancer backend. # Display information of a specific load balancer backend. + "backendDisplayName": "A String", # Display name of the backend. For example, it might be an instance name for the instance group backends, or an IP address and port for zonal network endpoint group backends. + "backendServiceUri": "A String", # URI of the backend service this backend belongs to (if applicable). + "healthCheckConfigState": "A String", # Output only. Health check configuration state for the backend. This is a result of the static firewall analysis (verifying that health check traffic from required IP ranges to the backend is allowed or not). The backend might still be unhealthy even if these firewalls are configured. Please refer to the documentation for more information: https://cloud.google.com/load-balancing/docs/firewall-rules + "healthCheckUri": "A String", # URI of the health check attached to this backend (if applicable). + "instanceGroupUri": "A String", # URI of the instance group this backend belongs to (if applicable). + "instanceUri": "A String", # URI of the backend instance (if applicable). Populated for instance group backends, and zonal NEG backends. + "networkEndpointGroupUri": "A String", # URI of the network endpoint group this backend belongs to (if applicable). + }, + "nat": { # For display only. Metadata associated with NAT. # Display information of a NAT. + "natGatewayName": "A String", # The name of Cloud NAT Gateway. Only valid when type is CLOUD_NAT. + "networkUri": "A String", # URI of the network where NAT translation takes place. + "newDestinationIp": "A String", # Destination IP address after NAT translation. + "newDestinationPort": 42, # Destination port after NAT translation. Only valid when protocol is TCP or UDP. + "newSourceIp": "A String", # Source IP address after NAT translation. + "newSourcePort": 42, # Source port after NAT translation. Only valid when protocol is TCP or UDP. + "oldDestinationIp": "A String", # Destination IP address before NAT translation. + "oldDestinationPort": 42, # Destination port before NAT translation. Only valid when protocol is TCP or UDP. + "oldSourceIp": "A String", # Source IP address before NAT translation. + "oldSourcePort": 42, # Source port before NAT translation. Only valid when protocol is TCP or UDP. + "protocol": "A String", # IP protocol in string format, for example: "TCP", "UDP", "ICMP". + "routerUri": "A String", # Uri of the Cloud Router. Only valid when type is CLOUD_NAT. + "type": "A String", # Type of NAT. + }, "network": { # For display only. Metadata associated with a Compute Engine network. # Display information of a Google Cloud network. "displayName": "A String", # Name of a Compute Engine network. "matchedIpRange": "A String", # The IP range that matches the test. "uri": "A String", # URI of a Compute Engine network. }, "projectId": "A String", # Project ID that contains the configuration this step is validating. + "proxyConnection": { # For display only. Metadata associated with ProxyConnection. # Display information of a ProxyConnection. + "networkUri": "A String", # URI of the network where connection is proxied. + "newDestinationIp": "A String", # Destination IP address of a new connection. + "newDestinationPort": 42, # Destination port of a new connection. Only valid when protocol is TCP or UDP. + "newSourceIp": "A String", # Source IP address of a new connection. + "newSourcePort": 42, # Source port of a new connection. Only valid when protocol is TCP or UDP. + "oldDestinationIp": "A String", # Destination IP address of an original connection + "oldDestinationPort": 42, # Destination port of an original connection. Only valid when protocol is TCP or UDP. + "oldSourceIp": "A String", # Source IP address of an original connection. + "oldSourcePort": 42, # Source port of an original connection. Only valid when protocol is TCP or UDP. + "protocol": "A String", # IP protocol in string format, for example: "TCP", "UDP", "ICMP". + "subnetUri": "A String", # Uri of proxy subnet. + }, "route": { # For display only. Metadata associated with a Compute Engine route. # Display information of a Compute Engine route. "destIpRange": "A String", # Destination IP range of the route. "destPortRanges": [ # Destination port ranges of the route. Policy based routes only. @@ -1187,7 +1307,7 @@

Method Details

"forwardingRuleTarget": "A String", # Output only. Specifies the type of the target of the forwarding rule. "gkeMasterCluster": "A String", # A cluster URI for [Google Kubernetes Engine master](https://cloud.google.com/kubernetes-engine/docs/concepts/cluster-architecture). "instance": "A String", # A Compute Engine instance URI. - "ipAddress": "A String", # The IP address of the endpoint, which can be an external or internal IP. An IPv6 address is only allowed when the test's destination is a [global load balancer VIP](https://cloud.google.com/load-balancing/docs/load-balancing-overview). + "ipAddress": "A String", # The IP address of the endpoint, which can be an external or internal IP. "loadBalancerId": "A String", # Output only. ID of the load balancer the forwarding rule points to. Empty for forwarding rules not related to load balancers. "loadBalancerType": "A String", # Output only. Type of the load balancer the forwarding rule points to. "network": "A String", # A Compute Engine network URI. @@ -1245,7 +1365,7 @@

Method Details

"forwardingRuleTarget": "A String", # Output only. Specifies the type of the target of the forwarding rule. "gkeMasterCluster": "A String", # A cluster URI for [Google Kubernetes Engine master](https://cloud.google.com/kubernetes-engine/docs/concepts/cluster-architecture). "instance": "A String", # A Compute Engine instance URI. - "ipAddress": "A String", # The IP address of the endpoint, which can be an external or internal IP. An IPv6 address is only allowed when the test's destination is a [global load balancer VIP](https://cloud.google.com/load-balancing/docs/load-balancing-overview). + "ipAddress": "A String", # The IP address of the endpoint, which can be an external or internal IP. "loadBalancerId": "A String", # Output only. ID of the load balancer the forwarding rule points to. Empty for forwarding rules not related to load balancers. "loadBalancerType": "A String", # Output only. Type of the load balancer the forwarding rule points to. "network": "A String", # A Compute Engine network URI. @@ -1362,7 +1482,10 @@

Method Details

"description": "A String", # A description of the step. Usually this is a summary of the state. "drop": { # Details of the final state "drop" and associated resource. # Display information of the final state "drop" and reason. "cause": "A String", # Cause that the packet is dropped. + "destinationIp": "A String", # Destination IP address of the dropped packet (if relevant). + "region": "A String", # Region of the dropped packet (if relevant). "resourceUri": "A String", # URI of the resource that caused the drop. + "sourceIp": "A String", # Source IP address of the dropped packet (if relevant). }, "endpoint": { # For display only. The specification of the endpoints for the test. EndpointInfo is derived from source and destination Endpoint and validated by the backend data plane model. # Display information of the source and destination under analysis. The endpoint information in an intermediate state may differ with the initial input, as it might be modified by state like NAT, or Connection Proxy. "destinationIp": "A String", # Destination IP address. @@ -1444,12 +1567,49 @@

Method Details

"healthCheckUri": "A String", # URI of the health check for the load balancer. Deprecated and no longer populated as different load balancer backends might have different health checks. "loadBalancerType": "A String", # Type of the load balancer. }, + "loadBalancerBackendInfo": { # For display only. Metadata associated with the load balancer backend. # Display information of a specific load balancer backend. + "backendDisplayName": "A String", # Display name of the backend. For example, it might be an instance name for the instance group backends, or an IP address and port for zonal network endpoint group backends. + "backendServiceUri": "A String", # URI of the backend service this backend belongs to (if applicable). + "healthCheckConfigState": "A String", # Output only. Health check configuration state for the backend. This is a result of the static firewall analysis (verifying that health check traffic from required IP ranges to the backend is allowed or not). The backend might still be unhealthy even if these firewalls are configured. Please refer to the documentation for more information: https://cloud.google.com/load-balancing/docs/firewall-rules + "healthCheckUri": "A String", # URI of the health check attached to this backend (if applicable). + "instanceGroupUri": "A String", # URI of the instance group this backend belongs to (if applicable). + "instanceUri": "A String", # URI of the backend instance (if applicable). Populated for instance group backends, and zonal NEG backends. + "networkEndpointGroupUri": "A String", # URI of the network endpoint group this backend belongs to (if applicable). + }, + "nat": { # For display only. Metadata associated with NAT. # Display information of a NAT. + "natGatewayName": "A String", # The name of Cloud NAT Gateway. Only valid when type is CLOUD_NAT. + "networkUri": "A String", # URI of the network where NAT translation takes place. + "newDestinationIp": "A String", # Destination IP address after NAT translation. + "newDestinationPort": 42, # Destination port after NAT translation. Only valid when protocol is TCP or UDP. + "newSourceIp": "A String", # Source IP address after NAT translation. + "newSourcePort": 42, # Source port after NAT translation. Only valid when protocol is TCP or UDP. + "oldDestinationIp": "A String", # Destination IP address before NAT translation. + "oldDestinationPort": 42, # Destination port before NAT translation. Only valid when protocol is TCP or UDP. + "oldSourceIp": "A String", # Source IP address before NAT translation. + "oldSourcePort": 42, # Source port before NAT translation. Only valid when protocol is TCP or UDP. + "protocol": "A String", # IP protocol in string format, for example: "TCP", "UDP", "ICMP". + "routerUri": "A String", # Uri of the Cloud Router. Only valid when type is CLOUD_NAT. + "type": "A String", # Type of NAT. + }, "network": { # For display only. Metadata associated with a Compute Engine network. # Display information of a Google Cloud network. "displayName": "A String", # Name of a Compute Engine network. "matchedIpRange": "A String", # The IP range that matches the test. "uri": "A String", # URI of a Compute Engine network. }, "projectId": "A String", # Project ID that contains the configuration this step is validating. + "proxyConnection": { # For display only. Metadata associated with ProxyConnection. # Display information of a ProxyConnection. + "networkUri": "A String", # URI of the network where connection is proxied. + "newDestinationIp": "A String", # Destination IP address of a new connection. + "newDestinationPort": 42, # Destination port of a new connection. Only valid when protocol is TCP or UDP. + "newSourceIp": "A String", # Source IP address of a new connection. + "newSourcePort": 42, # Source port of a new connection. Only valid when protocol is TCP or UDP. + "oldDestinationIp": "A String", # Destination IP address of an original connection + "oldDestinationPort": 42, # Destination port of an original connection. Only valid when protocol is TCP or UDP. + "oldSourceIp": "A String", # Source IP address of an original connection. + "oldSourcePort": 42, # Source port of an original connection. Only valid when protocol is TCP or UDP. + "protocol": "A String", # IP protocol in string format, for example: "TCP", "UDP", "ICMP". + "subnetUri": "A String", # Uri of proxy subnet. + }, "route": { # For display only. Metadata associated with a Compute Engine route. # Display information of a Compute Engine route. "destIpRange": "A String", # Destination IP range of the route. "destPortRanges": [ # Destination port ranges of the route. Policy based routes only. @@ -1525,7 +1685,7 @@

Method Details

"forwardingRuleTarget": "A String", # Output only. Specifies the type of the target of the forwarding rule. "gkeMasterCluster": "A String", # A cluster URI for [Google Kubernetes Engine master](https://cloud.google.com/kubernetes-engine/docs/concepts/cluster-architecture). "instance": "A String", # A Compute Engine instance URI. - "ipAddress": "A String", # The IP address of the endpoint, which can be an external or internal IP. An IPv6 address is only allowed when the test's destination is a [global load balancer VIP](https://cloud.google.com/load-balancing/docs/load-balancing-overview). + "ipAddress": "A String", # The IP address of the endpoint, which can be an external or internal IP. "loadBalancerId": "A String", # Output only. ID of the load balancer the forwarding rule points to. Empty for forwarding rules not related to load balancers. "loadBalancerType": "A String", # Output only. Type of the load balancer the forwarding rule points to. "network": "A String", # A Compute Engine network URI. diff --git a/docs/dyn/networkmanagement_v1beta1.projects.locations.global_.connectivityTests.html b/docs/dyn/networkmanagement_v1beta1.projects.locations.global_.connectivityTests.html index ce6c8b33522..ca9a59a1147 100644 --- a/docs/dyn/networkmanagement_v1beta1.projects.locations.global_.connectivityTests.html +++ b/docs/dyn/networkmanagement_v1beta1.projects.locations.global_.connectivityTests.html @@ -140,7 +140,7 @@

Method Details

"forwardingRuleTarget": "A String", # Output only. Specifies the type of the target of the forwarding rule. "gkeMasterCluster": "A String", # A cluster URI for [Google Kubernetes Engine master](https://cloud.google.com/kubernetes-engine/docs/concepts/cluster-architecture). "instance": "A String", # A Compute Engine instance URI. - "ipAddress": "A String", # The IP address of the endpoint, which can be an external or internal IP. An IPv6 address is only allowed when the test's destination is a [global load balancer VIP](https://cloud.google.com/load-balancing/docs/load-balancing-overview). + "ipAddress": "A String", # The IP address of the endpoint, which can be an external or internal IP. "loadBalancerId": "A String", # Output only. ID of the load balancer the forwarding rule points to. Empty for forwarding rules not related to load balancers. "loadBalancerType": "A String", # Output only. Type of the load balancer the forwarding rule points to. "network": "A String", # A Compute Engine network URI. @@ -258,7 +258,10 @@

Method Details

"description": "A String", # A description of the step. Usually this is a summary of the state. "drop": { # Details of the final state "drop" and associated resource. # Display information of the final state "drop" and reason. "cause": "A String", # Cause that the packet is dropped. + "destinationIp": "A String", # Destination IP address of the dropped packet (if relevant). + "region": "A String", # Region of the dropped packet (if relevant). "resourceUri": "A String", # URI of the resource that caused the drop. + "sourceIp": "A String", # Source IP address of the dropped packet (if relevant). }, "endpoint": { # For display only. The specification of the endpoints for the test. EndpointInfo is derived from source and destination Endpoint and validated by the backend data plane model. # Display information of the source and destination under analysis. The endpoint information in an intermediate state may differ with the initial input, as it might be modified by state like NAT, or Connection Proxy. "destinationIp": "A String", # Destination IP address. @@ -340,12 +343,49 @@

Method Details

"healthCheckUri": "A String", # URI of the health check for the load balancer. Deprecated and no longer populated as different load balancer backends might have different health checks. "loadBalancerType": "A String", # Type of the load balancer. }, + "loadBalancerBackendInfo": { # For display only. Metadata associated with the load balancer backend. # Display information of a specific load balancer backend. + "backendDisplayName": "A String", # Display name of the backend. For example, it might be an instance name for the instance group backends, or an IP address and port for zonal network endpoint group backends. + "backendServiceUri": "A String", # URI of the backend service this backend belongs to (if applicable). + "healthCheckConfigState": "A String", # Output only. Health check configuration state for the backend. This is a result of the static firewall analysis (verifying that health check traffic from required IP ranges to the backend is allowed or not). The backend might still be unhealthy even if these firewalls are configured. Please refer to the documentation for more information: https://cloud.google.com/load-balancing/docs/firewall-rules + "healthCheckUri": "A String", # URI of the health check attached to this backend (if applicable). + "instanceGroupUri": "A String", # URI of the instance group this backend belongs to (if applicable). + "instanceUri": "A String", # URI of the backend instance (if applicable). Populated for instance group backends, and zonal NEG backends. + "networkEndpointGroupUri": "A String", # URI of the network endpoint group this backend belongs to (if applicable). + }, + "nat": { # For display only. Metadata associated with NAT. # Display information of a NAT. + "natGatewayName": "A String", # The name of Cloud NAT Gateway. Only valid when type is CLOUD_NAT. + "networkUri": "A String", # URI of the network where NAT translation takes place. + "newDestinationIp": "A String", # Destination IP address after NAT translation. + "newDestinationPort": 42, # Destination port after NAT translation. Only valid when protocol is TCP or UDP. + "newSourceIp": "A String", # Source IP address after NAT translation. + "newSourcePort": 42, # Source port after NAT translation. Only valid when protocol is TCP or UDP. + "oldDestinationIp": "A String", # Destination IP address before NAT translation. + "oldDestinationPort": 42, # Destination port before NAT translation. Only valid when protocol is TCP or UDP. + "oldSourceIp": "A String", # Source IP address before NAT translation. + "oldSourcePort": 42, # Source port before NAT translation. Only valid when protocol is TCP or UDP. + "protocol": "A String", # IP protocol in string format, for example: "TCP", "UDP", "ICMP". + "routerUri": "A String", # Uri of the Cloud Router. Only valid when type is CLOUD_NAT. + "type": "A String", # Type of NAT. + }, "network": { # For display only. Metadata associated with a Compute Engine network. # Display information of a Google Cloud network. "displayName": "A String", # Name of a Compute Engine network. "matchedIpRange": "A String", # The IP range that matches the test. "uri": "A String", # URI of a Compute Engine network. }, "projectId": "A String", # Project ID that contains the configuration this step is validating. + "proxyConnection": { # For display only. Metadata associated with ProxyConnection. # Display information of a ProxyConnection. + "networkUri": "A String", # URI of the network where connection is proxied. + "newDestinationIp": "A String", # Destination IP address of a new connection. + "newDestinationPort": 42, # Destination port of a new connection. Only valid when protocol is TCP or UDP. + "newSourceIp": "A String", # Source IP address of a new connection. + "newSourcePort": 42, # Source port of a new connection. Only valid when protocol is TCP or UDP. + "oldDestinationIp": "A String", # Destination IP address of an original connection + "oldDestinationPort": 42, # Destination port of an original connection. Only valid when protocol is TCP or UDP. + "oldSourceIp": "A String", # Source IP address of an original connection. + "oldSourcePort": 42, # Source port of an original connection. Only valid when protocol is TCP or UDP. + "protocol": "A String", # IP protocol in string format, for example: "TCP", "UDP", "ICMP". + "subnetUri": "A String", # Uri of proxy subnet. + }, "route": { # For display only. Metadata associated with a Compute Engine route. # Display information of a Compute Engine route. "destIpRange": "A String", # Destination IP range of the route. "destPortRanges": [ # Destination port ranges of the route. Policy based routes only. @@ -421,7 +461,7 @@

Method Details

"forwardingRuleTarget": "A String", # Output only. Specifies the type of the target of the forwarding rule. "gkeMasterCluster": "A String", # A cluster URI for [Google Kubernetes Engine master](https://cloud.google.com/kubernetes-engine/docs/concepts/cluster-architecture). "instance": "A String", # A Compute Engine instance URI. - "ipAddress": "A String", # The IP address of the endpoint, which can be an external or internal IP. An IPv6 address is only allowed when the test's destination is a [global load balancer VIP](https://cloud.google.com/load-balancing/docs/load-balancing-overview). + "ipAddress": "A String", # The IP address of the endpoint, which can be an external or internal IP. "loadBalancerId": "A String", # Output only. ID of the load balancer the forwarding rule points to. Empty for forwarding rules not related to load balancers. "loadBalancerType": "A String", # Output only. Type of the load balancer the forwarding rule points to. "network": "A String", # A Compute Engine network URI. @@ -529,7 +569,7 @@

Method Details

"forwardingRuleTarget": "A String", # Output only. Specifies the type of the target of the forwarding rule. "gkeMasterCluster": "A String", # A cluster URI for [Google Kubernetes Engine master](https://cloud.google.com/kubernetes-engine/docs/concepts/cluster-architecture). "instance": "A String", # A Compute Engine instance URI. - "ipAddress": "A String", # The IP address of the endpoint, which can be an external or internal IP. An IPv6 address is only allowed when the test's destination is a [global load balancer VIP](https://cloud.google.com/load-balancing/docs/load-balancing-overview). + "ipAddress": "A String", # The IP address of the endpoint, which can be an external or internal IP. "loadBalancerId": "A String", # Output only. ID of the load balancer the forwarding rule points to. Empty for forwarding rules not related to load balancers. "loadBalancerType": "A String", # Output only. Type of the load balancer the forwarding rule points to. "network": "A String", # A Compute Engine network URI. @@ -647,7 +687,10 @@

Method Details

"description": "A String", # A description of the step. Usually this is a summary of the state. "drop": { # Details of the final state "drop" and associated resource. # Display information of the final state "drop" and reason. "cause": "A String", # Cause that the packet is dropped. + "destinationIp": "A String", # Destination IP address of the dropped packet (if relevant). + "region": "A String", # Region of the dropped packet (if relevant). "resourceUri": "A String", # URI of the resource that caused the drop. + "sourceIp": "A String", # Source IP address of the dropped packet (if relevant). }, "endpoint": { # For display only. The specification of the endpoints for the test. EndpointInfo is derived from source and destination Endpoint and validated by the backend data plane model. # Display information of the source and destination under analysis. The endpoint information in an intermediate state may differ with the initial input, as it might be modified by state like NAT, or Connection Proxy. "destinationIp": "A String", # Destination IP address. @@ -729,12 +772,49 @@

Method Details

"healthCheckUri": "A String", # URI of the health check for the load balancer. Deprecated and no longer populated as different load balancer backends might have different health checks. "loadBalancerType": "A String", # Type of the load balancer. }, + "loadBalancerBackendInfo": { # For display only. Metadata associated with the load balancer backend. # Display information of a specific load balancer backend. + "backendDisplayName": "A String", # Display name of the backend. For example, it might be an instance name for the instance group backends, or an IP address and port for zonal network endpoint group backends. + "backendServiceUri": "A String", # URI of the backend service this backend belongs to (if applicable). + "healthCheckConfigState": "A String", # Output only. Health check configuration state for the backend. This is a result of the static firewall analysis (verifying that health check traffic from required IP ranges to the backend is allowed or not). The backend might still be unhealthy even if these firewalls are configured. Please refer to the documentation for more information: https://cloud.google.com/load-balancing/docs/firewall-rules + "healthCheckUri": "A String", # URI of the health check attached to this backend (if applicable). + "instanceGroupUri": "A String", # URI of the instance group this backend belongs to (if applicable). + "instanceUri": "A String", # URI of the backend instance (if applicable). Populated for instance group backends, and zonal NEG backends. + "networkEndpointGroupUri": "A String", # URI of the network endpoint group this backend belongs to (if applicable). + }, + "nat": { # For display only. Metadata associated with NAT. # Display information of a NAT. + "natGatewayName": "A String", # The name of Cloud NAT Gateway. Only valid when type is CLOUD_NAT. + "networkUri": "A String", # URI of the network where NAT translation takes place. + "newDestinationIp": "A String", # Destination IP address after NAT translation. + "newDestinationPort": 42, # Destination port after NAT translation. Only valid when protocol is TCP or UDP. + "newSourceIp": "A String", # Source IP address after NAT translation. + "newSourcePort": 42, # Source port after NAT translation. Only valid when protocol is TCP or UDP. + "oldDestinationIp": "A String", # Destination IP address before NAT translation. + "oldDestinationPort": 42, # Destination port before NAT translation. Only valid when protocol is TCP or UDP. + "oldSourceIp": "A String", # Source IP address before NAT translation. + "oldSourcePort": 42, # Source port before NAT translation. Only valid when protocol is TCP or UDP. + "protocol": "A String", # IP protocol in string format, for example: "TCP", "UDP", "ICMP". + "routerUri": "A String", # Uri of the Cloud Router. Only valid when type is CLOUD_NAT. + "type": "A String", # Type of NAT. + }, "network": { # For display only. Metadata associated with a Compute Engine network. # Display information of a Google Cloud network. "displayName": "A String", # Name of a Compute Engine network. "matchedIpRange": "A String", # The IP range that matches the test. "uri": "A String", # URI of a Compute Engine network. }, "projectId": "A String", # Project ID that contains the configuration this step is validating. + "proxyConnection": { # For display only. Metadata associated with ProxyConnection. # Display information of a ProxyConnection. + "networkUri": "A String", # URI of the network where connection is proxied. + "newDestinationIp": "A String", # Destination IP address of a new connection. + "newDestinationPort": 42, # Destination port of a new connection. Only valid when protocol is TCP or UDP. + "newSourceIp": "A String", # Source IP address of a new connection. + "newSourcePort": 42, # Source port of a new connection. Only valid when protocol is TCP or UDP. + "oldDestinationIp": "A String", # Destination IP address of an original connection + "oldDestinationPort": 42, # Destination port of an original connection. Only valid when protocol is TCP or UDP. + "oldSourceIp": "A String", # Source IP address of an original connection. + "oldSourcePort": 42, # Source port of an original connection. Only valid when protocol is TCP or UDP. + "protocol": "A String", # IP protocol in string format, for example: "TCP", "UDP", "ICMP". + "subnetUri": "A String", # Uri of proxy subnet. + }, "route": { # For display only. Metadata associated with a Compute Engine route. # Display information of a Compute Engine route. "destIpRange": "A String", # Destination IP range of the route. "destPortRanges": [ # Destination port ranges of the route. Policy based routes only. @@ -810,7 +890,7 @@

Method Details

"forwardingRuleTarget": "A String", # Output only. Specifies the type of the target of the forwarding rule. "gkeMasterCluster": "A String", # A cluster URI for [Google Kubernetes Engine master](https://cloud.google.com/kubernetes-engine/docs/concepts/cluster-architecture). "instance": "A String", # A Compute Engine instance URI. - "ipAddress": "A String", # The IP address of the endpoint, which can be an external or internal IP. An IPv6 address is only allowed when the test's destination is a [global load balancer VIP](https://cloud.google.com/load-balancing/docs/load-balancing-overview). + "ipAddress": "A String", # The IP address of the endpoint, which can be an external or internal IP. "loadBalancerId": "A String", # Output only. ID of the load balancer the forwarding rule points to. Empty for forwarding rules not related to load balancers. "loadBalancerType": "A String", # Output only. Type of the load balancer the forwarding rule points to. "network": "A String", # A Compute Engine network URI. @@ -909,7 +989,7 @@

Method Details

"forwardingRuleTarget": "A String", # Output only. Specifies the type of the target of the forwarding rule. "gkeMasterCluster": "A String", # A cluster URI for [Google Kubernetes Engine master](https://cloud.google.com/kubernetes-engine/docs/concepts/cluster-architecture). "instance": "A String", # A Compute Engine instance URI. - "ipAddress": "A String", # The IP address of the endpoint, which can be an external or internal IP. An IPv6 address is only allowed when the test's destination is a [global load balancer VIP](https://cloud.google.com/load-balancing/docs/load-balancing-overview). + "ipAddress": "A String", # The IP address of the endpoint, which can be an external or internal IP. "loadBalancerId": "A String", # Output only. ID of the load balancer the forwarding rule points to. Empty for forwarding rules not related to load balancers. "loadBalancerType": "A String", # Output only. Type of the load balancer the forwarding rule points to. "network": "A String", # A Compute Engine network URI. @@ -1027,7 +1107,10 @@

Method Details

"description": "A String", # A description of the step. Usually this is a summary of the state. "drop": { # Details of the final state "drop" and associated resource. # Display information of the final state "drop" and reason. "cause": "A String", # Cause that the packet is dropped. + "destinationIp": "A String", # Destination IP address of the dropped packet (if relevant). + "region": "A String", # Region of the dropped packet (if relevant). "resourceUri": "A String", # URI of the resource that caused the drop. + "sourceIp": "A String", # Source IP address of the dropped packet (if relevant). }, "endpoint": { # For display only. The specification of the endpoints for the test. EndpointInfo is derived from source and destination Endpoint and validated by the backend data plane model. # Display information of the source and destination under analysis. The endpoint information in an intermediate state may differ with the initial input, as it might be modified by state like NAT, or Connection Proxy. "destinationIp": "A String", # Destination IP address. @@ -1109,12 +1192,49 @@

Method Details

"healthCheckUri": "A String", # URI of the health check for the load balancer. Deprecated and no longer populated as different load balancer backends might have different health checks. "loadBalancerType": "A String", # Type of the load balancer. }, + "loadBalancerBackendInfo": { # For display only. Metadata associated with the load balancer backend. # Display information of a specific load balancer backend. + "backendDisplayName": "A String", # Display name of the backend. For example, it might be an instance name for the instance group backends, or an IP address and port for zonal network endpoint group backends. + "backendServiceUri": "A String", # URI of the backend service this backend belongs to (if applicable). + "healthCheckConfigState": "A String", # Output only. Health check configuration state for the backend. This is a result of the static firewall analysis (verifying that health check traffic from required IP ranges to the backend is allowed or not). The backend might still be unhealthy even if these firewalls are configured. Please refer to the documentation for more information: https://cloud.google.com/load-balancing/docs/firewall-rules + "healthCheckUri": "A String", # URI of the health check attached to this backend (if applicable). + "instanceGroupUri": "A String", # URI of the instance group this backend belongs to (if applicable). + "instanceUri": "A String", # URI of the backend instance (if applicable). Populated for instance group backends, and zonal NEG backends. + "networkEndpointGroupUri": "A String", # URI of the network endpoint group this backend belongs to (if applicable). + }, + "nat": { # For display only. Metadata associated with NAT. # Display information of a NAT. + "natGatewayName": "A String", # The name of Cloud NAT Gateway. Only valid when type is CLOUD_NAT. + "networkUri": "A String", # URI of the network where NAT translation takes place. + "newDestinationIp": "A String", # Destination IP address after NAT translation. + "newDestinationPort": 42, # Destination port after NAT translation. Only valid when protocol is TCP or UDP. + "newSourceIp": "A String", # Source IP address after NAT translation. + "newSourcePort": 42, # Source port after NAT translation. Only valid when protocol is TCP or UDP. + "oldDestinationIp": "A String", # Destination IP address before NAT translation. + "oldDestinationPort": 42, # Destination port before NAT translation. Only valid when protocol is TCP or UDP. + "oldSourceIp": "A String", # Source IP address before NAT translation. + "oldSourcePort": 42, # Source port before NAT translation. Only valid when protocol is TCP or UDP. + "protocol": "A String", # IP protocol in string format, for example: "TCP", "UDP", "ICMP". + "routerUri": "A String", # Uri of the Cloud Router. Only valid when type is CLOUD_NAT. + "type": "A String", # Type of NAT. + }, "network": { # For display only. Metadata associated with a Compute Engine network. # Display information of a Google Cloud network. "displayName": "A String", # Name of a Compute Engine network. "matchedIpRange": "A String", # The IP range that matches the test. "uri": "A String", # URI of a Compute Engine network. }, "projectId": "A String", # Project ID that contains the configuration this step is validating. + "proxyConnection": { # For display only. Metadata associated with ProxyConnection. # Display information of a ProxyConnection. + "networkUri": "A String", # URI of the network where connection is proxied. + "newDestinationIp": "A String", # Destination IP address of a new connection. + "newDestinationPort": 42, # Destination port of a new connection. Only valid when protocol is TCP or UDP. + "newSourceIp": "A String", # Source IP address of a new connection. + "newSourcePort": 42, # Source port of a new connection. Only valid when protocol is TCP or UDP. + "oldDestinationIp": "A String", # Destination IP address of an original connection + "oldDestinationPort": 42, # Destination port of an original connection. Only valid when protocol is TCP or UDP. + "oldSourceIp": "A String", # Source IP address of an original connection. + "oldSourcePort": 42, # Source port of an original connection. Only valid when protocol is TCP or UDP. + "protocol": "A String", # IP protocol in string format, for example: "TCP", "UDP", "ICMP". + "subnetUri": "A String", # Uri of proxy subnet. + }, "route": { # For display only. Metadata associated with a Compute Engine route. # Display information of a Compute Engine route. "destIpRange": "A String", # Destination IP range of the route. "destPortRanges": [ # Destination port ranges of the route. Policy based routes only. @@ -1190,7 +1310,7 @@

Method Details

"forwardingRuleTarget": "A String", # Output only. Specifies the type of the target of the forwarding rule. "gkeMasterCluster": "A String", # A cluster URI for [Google Kubernetes Engine master](https://cloud.google.com/kubernetes-engine/docs/concepts/cluster-architecture). "instance": "A String", # A Compute Engine instance URI. - "ipAddress": "A String", # The IP address of the endpoint, which can be an external or internal IP. An IPv6 address is only allowed when the test's destination is a [global load balancer VIP](https://cloud.google.com/load-balancing/docs/load-balancing-overview). + "ipAddress": "A String", # The IP address of the endpoint, which can be an external or internal IP. "loadBalancerId": "A String", # Output only. ID of the load balancer the forwarding rule points to. Empty for forwarding rules not related to load balancers. "loadBalancerType": "A String", # Output only. Type of the load balancer the forwarding rule points to. "network": "A String", # A Compute Engine network URI. @@ -1248,7 +1368,7 @@

Method Details

"forwardingRuleTarget": "A String", # Output only. Specifies the type of the target of the forwarding rule. "gkeMasterCluster": "A String", # A cluster URI for [Google Kubernetes Engine master](https://cloud.google.com/kubernetes-engine/docs/concepts/cluster-architecture). "instance": "A String", # A Compute Engine instance URI. - "ipAddress": "A String", # The IP address of the endpoint, which can be an external or internal IP. An IPv6 address is only allowed when the test's destination is a [global load balancer VIP](https://cloud.google.com/load-balancing/docs/load-balancing-overview). + "ipAddress": "A String", # The IP address of the endpoint, which can be an external or internal IP. "loadBalancerId": "A String", # Output only. ID of the load balancer the forwarding rule points to. Empty for forwarding rules not related to load balancers. "loadBalancerType": "A String", # Output only. Type of the load balancer the forwarding rule points to. "network": "A String", # A Compute Engine network URI. @@ -1366,7 +1486,10 @@

Method Details

"description": "A String", # A description of the step. Usually this is a summary of the state. "drop": { # Details of the final state "drop" and associated resource. # Display information of the final state "drop" and reason. "cause": "A String", # Cause that the packet is dropped. + "destinationIp": "A String", # Destination IP address of the dropped packet (if relevant). + "region": "A String", # Region of the dropped packet (if relevant). "resourceUri": "A String", # URI of the resource that caused the drop. + "sourceIp": "A String", # Source IP address of the dropped packet (if relevant). }, "endpoint": { # For display only. The specification of the endpoints for the test. EndpointInfo is derived from source and destination Endpoint and validated by the backend data plane model. # Display information of the source and destination under analysis. The endpoint information in an intermediate state may differ with the initial input, as it might be modified by state like NAT, or Connection Proxy. "destinationIp": "A String", # Destination IP address. @@ -1448,12 +1571,49 @@

Method Details

"healthCheckUri": "A String", # URI of the health check for the load balancer. Deprecated and no longer populated as different load balancer backends might have different health checks. "loadBalancerType": "A String", # Type of the load balancer. }, + "loadBalancerBackendInfo": { # For display only. Metadata associated with the load balancer backend. # Display information of a specific load balancer backend. + "backendDisplayName": "A String", # Display name of the backend. For example, it might be an instance name for the instance group backends, or an IP address and port for zonal network endpoint group backends. + "backendServiceUri": "A String", # URI of the backend service this backend belongs to (if applicable). + "healthCheckConfigState": "A String", # Output only. Health check configuration state for the backend. This is a result of the static firewall analysis (verifying that health check traffic from required IP ranges to the backend is allowed or not). The backend might still be unhealthy even if these firewalls are configured. Please refer to the documentation for more information: https://cloud.google.com/load-balancing/docs/firewall-rules + "healthCheckUri": "A String", # URI of the health check attached to this backend (if applicable). + "instanceGroupUri": "A String", # URI of the instance group this backend belongs to (if applicable). + "instanceUri": "A String", # URI of the backend instance (if applicable). Populated for instance group backends, and zonal NEG backends. + "networkEndpointGroupUri": "A String", # URI of the network endpoint group this backend belongs to (if applicable). + }, + "nat": { # For display only. Metadata associated with NAT. # Display information of a NAT. + "natGatewayName": "A String", # The name of Cloud NAT Gateway. Only valid when type is CLOUD_NAT. + "networkUri": "A String", # URI of the network where NAT translation takes place. + "newDestinationIp": "A String", # Destination IP address after NAT translation. + "newDestinationPort": 42, # Destination port after NAT translation. Only valid when protocol is TCP or UDP. + "newSourceIp": "A String", # Source IP address after NAT translation. + "newSourcePort": 42, # Source port after NAT translation. Only valid when protocol is TCP or UDP. + "oldDestinationIp": "A String", # Destination IP address before NAT translation. + "oldDestinationPort": 42, # Destination port before NAT translation. Only valid when protocol is TCP or UDP. + "oldSourceIp": "A String", # Source IP address before NAT translation. + "oldSourcePort": 42, # Source port before NAT translation. Only valid when protocol is TCP or UDP. + "protocol": "A String", # IP protocol in string format, for example: "TCP", "UDP", "ICMP". + "routerUri": "A String", # Uri of the Cloud Router. Only valid when type is CLOUD_NAT. + "type": "A String", # Type of NAT. + }, "network": { # For display only. Metadata associated with a Compute Engine network. # Display information of a Google Cloud network. "displayName": "A String", # Name of a Compute Engine network. "matchedIpRange": "A String", # The IP range that matches the test. "uri": "A String", # URI of a Compute Engine network. }, "projectId": "A String", # Project ID that contains the configuration this step is validating. + "proxyConnection": { # For display only. Metadata associated with ProxyConnection. # Display information of a ProxyConnection. + "networkUri": "A String", # URI of the network where connection is proxied. + "newDestinationIp": "A String", # Destination IP address of a new connection. + "newDestinationPort": 42, # Destination port of a new connection. Only valid when protocol is TCP or UDP. + "newSourceIp": "A String", # Source IP address of a new connection. + "newSourcePort": 42, # Source port of a new connection. Only valid when protocol is TCP or UDP. + "oldDestinationIp": "A String", # Destination IP address of an original connection + "oldDestinationPort": 42, # Destination port of an original connection. Only valid when protocol is TCP or UDP. + "oldSourceIp": "A String", # Source IP address of an original connection. + "oldSourcePort": 42, # Source port of an original connection. Only valid when protocol is TCP or UDP. + "protocol": "A String", # IP protocol in string format, for example: "TCP", "UDP", "ICMP". + "subnetUri": "A String", # Uri of proxy subnet. + }, "route": { # For display only. Metadata associated with a Compute Engine route. # Display information of a Compute Engine route. "destIpRange": "A String", # Destination IP range of the route. "destPortRanges": [ # Destination port ranges of the route. Policy based routes only. @@ -1529,7 +1689,7 @@

Method Details

"forwardingRuleTarget": "A String", # Output only. Specifies the type of the target of the forwarding rule. "gkeMasterCluster": "A String", # A cluster URI for [Google Kubernetes Engine master](https://cloud.google.com/kubernetes-engine/docs/concepts/cluster-architecture). "instance": "A String", # A Compute Engine instance URI. - "ipAddress": "A String", # The IP address of the endpoint, which can be an external or internal IP. An IPv6 address is only allowed when the test's destination is a [global load balancer VIP](https://cloud.google.com/load-balancing/docs/load-balancing-overview). + "ipAddress": "A String", # The IP address of the endpoint, which can be an external or internal IP. "loadBalancerId": "A String", # Output only. ID of the load balancer the forwarding rule points to. Empty for forwarding rules not related to load balancers. "loadBalancerType": "A String", # Output only. Type of the load balancer the forwarding rule points to. "network": "A String", # A Compute Engine network URI. diff --git a/docs/dyn/networksecurity_v1beta1.organizations.locations.firewallEndpoints.html b/docs/dyn/networksecurity_v1beta1.organizations.locations.firewallEndpoints.html index 4b76d8ed094..415995bb7ec 100644 --- a/docs/dyn/networksecurity_v1beta1.organizations.locations.firewallEndpoints.html +++ b/docs/dyn/networksecurity_v1beta1.organizations.locations.firewallEndpoints.html @@ -114,13 +114,19 @@

Method Details

"associatedNetworks": [ # Output only. List of networks that are associated with this endpoint in the local zone. This is a projection of the FirewallEndpointAssociations pointing at this endpoint. A network will only appear in this list after traffic routing is fully configured. Format: projects/{project}/global/networks/{name}. "A String", ], + "associations": [ # Output only. List of FirewallEndpointAssociations that are associated to this endpoint. An association will only appear in this list after traffic routing is fully configured. + { # This is a subset of the FirewallEndpointAssociation message, containing fields to be used by the consumer. + "name": "A String", # Output only. The resource name of the FirewallEndpointAssociation. Format: projects/{project}/locations/{location}/firewallEndpointAssociations/{id} + "network": "A String", # Output only. The VPC network associated. Format: projects/{project}/global/networks/{name}. + }, + ], "billingProjectId": "A String", # Optional. Project to bill on endpoint uptime usage. "createTime": "A String", # Output only. Create time stamp "description": "A String", # Optional. Description of the firewall endpoint. Max length 2048 characters. "labels": { # Optional. Labels as key value pairs "a_key": "A String", }, - "name": "A String", # Output only. name of resource + "name": "A String", # Immutable. Identifier. name of resource "reconciling": True or False, # Output only. Whether reconciling is in progress, recommended per https://google.aip.dev/128. "state": "A String", # Output only. Current state of the endpoint. "updateTime": "A String", # Output only. Update time stamp @@ -211,13 +217,19 @@

Method Details

"associatedNetworks": [ # Output only. List of networks that are associated with this endpoint in the local zone. This is a projection of the FirewallEndpointAssociations pointing at this endpoint. A network will only appear in this list after traffic routing is fully configured. Format: projects/{project}/global/networks/{name}. "A String", ], + "associations": [ # Output only. List of FirewallEndpointAssociations that are associated to this endpoint. An association will only appear in this list after traffic routing is fully configured. + { # This is a subset of the FirewallEndpointAssociation message, containing fields to be used by the consumer. + "name": "A String", # Output only. The resource name of the FirewallEndpointAssociation. Format: projects/{project}/locations/{location}/firewallEndpointAssociations/{id} + "network": "A String", # Output only. The VPC network associated. Format: projects/{project}/global/networks/{name}. + }, + ], "billingProjectId": "A String", # Optional. Project to bill on endpoint uptime usage. "createTime": "A String", # Output only. Create time stamp "description": "A String", # Optional. Description of the firewall endpoint. Max length 2048 characters. "labels": { # Optional. Labels as key value pairs "a_key": "A String", }, - "name": "A String", # Output only. name of resource + "name": "A String", # Immutable. Identifier. name of resource "reconciling": True or False, # Output only. Whether reconciling is in progress, recommended per https://google.aip.dev/128. "state": "A String", # Output only. Current state of the endpoint. "updateTime": "A String", # Output only. Update time stamp @@ -248,13 +260,19 @@

Method Details

"associatedNetworks": [ # Output only. List of networks that are associated with this endpoint in the local zone. This is a projection of the FirewallEndpointAssociations pointing at this endpoint. A network will only appear in this list after traffic routing is fully configured. Format: projects/{project}/global/networks/{name}. "A String", ], + "associations": [ # Output only. List of FirewallEndpointAssociations that are associated to this endpoint. An association will only appear in this list after traffic routing is fully configured. + { # This is a subset of the FirewallEndpointAssociation message, containing fields to be used by the consumer. + "name": "A String", # Output only. The resource name of the FirewallEndpointAssociation. Format: projects/{project}/locations/{location}/firewallEndpointAssociations/{id} + "network": "A String", # Output only. The VPC network associated. Format: projects/{project}/global/networks/{name}. + }, + ], "billingProjectId": "A String", # Optional. Project to bill on endpoint uptime usage. "createTime": "A String", # Output only. Create time stamp "description": "A String", # Optional. Description of the firewall endpoint. Max length 2048 characters. "labels": { # Optional. Labels as key value pairs "a_key": "A String", }, - "name": "A String", # Output only. name of resource + "name": "A String", # Immutable. Identifier. name of resource "reconciling": True or False, # Output only. Whether reconciling is in progress, recommended per https://google.aip.dev/128. "state": "A String", # Output only. Current state of the endpoint. "updateTime": "A String", # Output only. Update time stamp @@ -286,7 +304,7 @@

Method Details

Update a single Endpoint.
 
 Args:
-  name: string, Output only. name of resource (required)
+  name: string, Immutable. Identifier. name of resource (required)
   body: object, The request body.
     The object takes the form of:
 
@@ -294,13 +312,19 @@ 

Method Details

"associatedNetworks": [ # Output only. List of networks that are associated with this endpoint in the local zone. This is a projection of the FirewallEndpointAssociations pointing at this endpoint. A network will only appear in this list after traffic routing is fully configured. Format: projects/{project}/global/networks/{name}. "A String", ], + "associations": [ # Output only. List of FirewallEndpointAssociations that are associated to this endpoint. An association will only appear in this list after traffic routing is fully configured. + { # This is a subset of the FirewallEndpointAssociation message, containing fields to be used by the consumer. + "name": "A String", # Output only. The resource name of the FirewallEndpointAssociation. Format: projects/{project}/locations/{location}/firewallEndpointAssociations/{id} + "network": "A String", # Output only. The VPC network associated. Format: projects/{project}/global/networks/{name}. + }, + ], "billingProjectId": "A String", # Optional. Project to bill on endpoint uptime usage. "createTime": "A String", # Output only. Create time stamp "description": "A String", # Optional. Description of the firewall endpoint. Max length 2048 characters. "labels": { # Optional. Labels as key value pairs "a_key": "A String", }, - "name": "A String", # Output only. name of resource + "name": "A String", # Immutable. Identifier. name of resource "reconciling": True or False, # Output only. Whether reconciling is in progress, recommended per https://google.aip.dev/128. "state": "A String", # Output only. Current state of the endpoint. "updateTime": "A String", # Output only. Update time stamp diff --git a/docs/dyn/networksecurity_v1beta1.projects.locations.firewallEndpointAssociations.html b/docs/dyn/networksecurity_v1beta1.projects.locations.firewallEndpointAssociations.html index 9a31dfcb961..ed915d50aca 100644 --- a/docs/dyn/networksecurity_v1beta1.projects.locations.firewallEndpointAssociations.html +++ b/docs/dyn/networksecurity_v1beta1.projects.locations.firewallEndpointAssociations.html @@ -116,7 +116,7 @@

Method Details

"labels": { # Optional. Labels as key value pairs "a_key": "A String", }, - "name": "A String", # Output only. name of resource + "name": "A String", # Immutable. Identifier. name of resource "network": "A String", # Required. The URL of the network that is being associated. "reconciling": True or False, # Output only. Whether reconciling is in progress, recommended per https://google.aip.dev/128. "state": "A String", # Output only. Current state of the association. @@ -124,7 +124,7 @@

Method Details

"updateTime": "A String", # Output only. Update time stamp } - firewallEndpointAssociationId: string, Required. Id of the requesting object. If auto-generating Id server-side, remove this field and firewall_endpoint_association_id from the method_signature of Create RPC. + firewallEndpointAssociationId: string, Optional. Id of the requesting object. If auto-generating Id server-side, remove this field and firewall_endpoint_association_id from the method_signature of Create RPC. requestId: string, Optional. An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. The server will guarantee that for at least 60 minutes since the first request. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000). x__xgafv: string, V1 error format. Allowed values @@ -211,7 +211,7 @@

Method Details

"labels": { # Optional. Labels as key value pairs "a_key": "A String", }, - "name": "A String", # Output only. name of resource + "name": "A String", # Immutable. Identifier. name of resource "network": "A String", # Required. The URL of the network that is being associated. "reconciling": True or False, # Output only. Whether reconciling is in progress, recommended per https://google.aip.dev/128. "state": "A String", # Output only. Current state of the association. @@ -246,7 +246,7 @@

Method Details

"labels": { # Optional. Labels as key value pairs "a_key": "A String", }, - "name": "A String", # Output only. name of resource + "name": "A String", # Immutable. Identifier. name of resource "network": "A String", # Required. The URL of the network that is being associated. "reconciling": True or False, # Output only. Whether reconciling is in progress, recommended per https://google.aip.dev/128. "state": "A String", # Output only. Current state of the association. @@ -280,7 +280,7 @@

Method Details

Update a single FirewallEndpointAssociation.
 
 Args:
-  name: string, Output only. name of resource (required)
+  name: string, Immutable. Identifier. name of resource (required)
   body: object, The request body.
     The object takes the form of:
 
@@ -290,7 +290,7 @@ 

Method Details

"labels": { # Optional. Labels as key value pairs "a_key": "A String", }, - "name": "A String", # Output only. name of resource + "name": "A String", # Immutable. Identifier. name of resource "network": "A String", # Required. The URL of the network that is being associated. "reconciling": True or False, # Output only. Whether reconciling is in progress, recommended per https://google.aip.dev/128. "state": "A String", # Output only. Current state of the association. diff --git a/docs/dyn/networkservices_v1.projects.locations.gateways.html b/docs/dyn/networkservices_v1.projects.locations.gateways.html index 11f5e4927b2..0141e5f3688 100644 --- a/docs/dyn/networkservices_v1.projects.locations.gateways.html +++ b/docs/dyn/networkservices_v1.projects.locations.gateways.html @@ -119,7 +119,7 @@

Method Details

body: object, The request body. The object takes the form of: -{ # Gateway represents the configuration for a proxy, typically a load balancer. It captures the ip:port over which the services are exposed by the proxy, along with any policy configurations. Routes have reference to to Gateways to dictate how requests should be routed by this Gateway. +{ # Gateway represents the configuration for a proxy, typically a load balancer. It captures the ip:port over which the services are exposed by the proxy, along with any policy configurations. Routes have reference to to Gateways to dictate how requests should be routed by this Gateway. Next id: 29 "addresses": [ # Optional. Zero or one IPv4 or IPv6 address on which the Gateway will receive the traffic. When no address is provided, an IP from the subnetwork is allocated This field only applies to gateways of type 'SECURE_WEB_GATEWAY'. Gateways of type 'OPEN_MESH' listen on 0.0.0.0 for IPv4 and :: for IPv6. "A String", ], @@ -129,6 +129,7 @@

Method Details

"createTime": "A String", # Output only. The timestamp when the resource was created. "description": "A String", # Optional. A free-text description of the resource. Max length 1024 characters. "gatewaySecurityPolicy": "A String", # Optional. A fully-qualified GatewaySecurityPolicy URL reference. Defines how a server should apply security policy to inbound (VM to Proxy) initiated connections. For example: `projects/*/locations/*/gatewaySecurityPolicies/swg-policy`. This policy is specific to gateways of type 'SECURE_WEB_GATEWAY'. + "ipVersion": "A String", # Optional. The IP Version that will be used by this gateway. Valid options are IPV4 or IPV6. Default is IPV4. "labels": { # Optional. Set of label tags associated with the Gateway resource. "a_key": "A String", }, @@ -224,7 +225,7 @@

Method Details

Returns: An object of the form: - { # Gateway represents the configuration for a proxy, typically a load balancer. It captures the ip:port over which the services are exposed by the proxy, along with any policy configurations. Routes have reference to to Gateways to dictate how requests should be routed by this Gateway. + { # Gateway represents the configuration for a proxy, typically a load balancer. It captures the ip:port over which the services are exposed by the proxy, along with any policy configurations. Routes have reference to to Gateways to dictate how requests should be routed by this Gateway. Next id: 29 "addresses": [ # Optional. Zero or one IPv4 or IPv6 address on which the Gateway will receive the traffic. When no address is provided, an IP from the subnetwork is allocated This field only applies to gateways of type 'SECURE_WEB_GATEWAY'. Gateways of type 'OPEN_MESH' listen on 0.0.0.0 for IPv4 and :: for IPv6. "A String", ], @@ -234,6 +235,7 @@

Method Details

"createTime": "A String", # Output only. The timestamp when the resource was created. "description": "A String", # Optional. A free-text description of the resource. Max length 1024 characters. "gatewaySecurityPolicy": "A String", # Optional. A fully-qualified GatewaySecurityPolicy URL reference. Defines how a server should apply security policy to inbound (VM to Proxy) initiated connections. For example: `projects/*/locations/*/gatewaySecurityPolicies/swg-policy`. This policy is specific to gateways of type 'SECURE_WEB_GATEWAY'. + "ipVersion": "A String", # Optional. The IP Version that will be used by this gateway. Valid options are IPV4 or IPV6. Default is IPV4. "labels": { # Optional. Set of label tags associated with the Gateway resource. "a_key": "A String", }, @@ -317,7 +319,7 @@

Method Details

{ # Response returned by the ListGateways method. "gateways": [ # List of Gateway resources. - { # Gateway represents the configuration for a proxy, typically a load balancer. It captures the ip:port over which the services are exposed by the proxy, along with any policy configurations. Routes have reference to to Gateways to dictate how requests should be routed by this Gateway. + { # Gateway represents the configuration for a proxy, typically a load balancer. It captures the ip:port over which the services are exposed by the proxy, along with any policy configurations. Routes have reference to to Gateways to dictate how requests should be routed by this Gateway. Next id: 29 "addresses": [ # Optional. Zero or one IPv4 or IPv6 address on which the Gateway will receive the traffic. When no address is provided, an IP from the subnetwork is allocated This field only applies to gateways of type 'SECURE_WEB_GATEWAY'. Gateways of type 'OPEN_MESH' listen on 0.0.0.0 for IPv4 and :: for IPv6. "A String", ], @@ -327,6 +329,7 @@

Method Details

"createTime": "A String", # Output only. The timestamp when the resource was created. "description": "A String", # Optional. A free-text description of the resource. Max length 1024 characters. "gatewaySecurityPolicy": "A String", # Optional. A fully-qualified GatewaySecurityPolicy URL reference. Defines how a server should apply security policy to inbound (VM to Proxy) initiated connections. For example: `projects/*/locations/*/gatewaySecurityPolicies/swg-policy`. This policy is specific to gateways of type 'SECURE_WEB_GATEWAY'. + "ipVersion": "A String", # Optional. The IP Version that will be used by this gateway. Valid options are IPV4 or IPV6. Default is IPV4. "labels": { # Optional. Set of label tags associated with the Gateway resource. "a_key": "A String", }, @@ -373,7 +376,7 @@

Method Details

body: object, The request body. The object takes the form of: -{ # Gateway represents the configuration for a proxy, typically a load balancer. It captures the ip:port over which the services are exposed by the proxy, along with any policy configurations. Routes have reference to to Gateways to dictate how requests should be routed by this Gateway. +{ # Gateway represents the configuration for a proxy, typically a load balancer. It captures the ip:port over which the services are exposed by the proxy, along with any policy configurations. Routes have reference to to Gateways to dictate how requests should be routed by this Gateway. Next id: 29 "addresses": [ # Optional. Zero or one IPv4 or IPv6 address on which the Gateway will receive the traffic. When no address is provided, an IP from the subnetwork is allocated This field only applies to gateways of type 'SECURE_WEB_GATEWAY'. Gateways of type 'OPEN_MESH' listen on 0.0.0.0 for IPv4 and :: for IPv6. "A String", ], @@ -383,6 +386,7 @@

Method Details

"createTime": "A String", # Output only. The timestamp when the resource was created. "description": "A String", # Optional. A free-text description of the resource. Max length 1024 characters. "gatewaySecurityPolicy": "A String", # Optional. A fully-qualified GatewaySecurityPolicy URL reference. Defines how a server should apply security policy to inbound (VM to Proxy) initiated connections. For example: `projects/*/locations/*/gatewaySecurityPolicies/swg-policy`. This policy is specific to gateways of type 'SECURE_WEB_GATEWAY'. + "ipVersion": "A String", # Optional. The IP Version that will be used by this gateway. Valid options are IPV4 or IPV6. Default is IPV4. "labels": { # Optional. Set of label tags associated with the Gateway resource. "a_key": "A String", }, diff --git a/docs/dyn/networkservices_v1beta1.projects.locations.gateways.html b/docs/dyn/networkservices_v1beta1.projects.locations.gateways.html index 990c3768a6c..61fda0b19f1 100644 --- a/docs/dyn/networkservices_v1beta1.projects.locations.gateways.html +++ b/docs/dyn/networkservices_v1beta1.projects.locations.gateways.html @@ -119,7 +119,7 @@

Method Details

body: object, The request body. The object takes the form of: -{ # Gateway represents the configuration for a proxy, typically a load balancer. It captures the ip:port over which the services are exposed by the proxy, along with any policy configurations. Routes have reference to to Gateways to dictate how requests should be routed by this Gateway. +{ # Gateway represents the configuration for a proxy, typically a load balancer. It captures the ip:port over which the services are exposed by the proxy, along with any policy configurations. Routes have reference to to Gateways to dictate how requests should be routed by this Gateway. Next id: 29 "addresses": [ # Optional. Zero or one IPv4 or IPv6 address on which the Gateway will receive the traffic. When no address is provided, an IP from the subnetwork is allocated This field only applies to gateways of type 'SECURE_WEB_GATEWAY'. Gateways of type 'OPEN_MESH' listen on 0.0.0.0 for IPv4 and :: for IPv6. "A String", ], @@ -129,6 +129,7 @@

Method Details

"createTime": "A String", # Output only. The timestamp when the resource was created. "description": "A String", # Optional. A free-text description of the resource. Max length 1024 characters. "gatewaySecurityPolicy": "A String", # Optional. A fully-qualified GatewaySecurityPolicy URL reference. Defines how a server should apply security policy to inbound (VM to Proxy) initiated connections. For example: `projects/*/locations/*/gatewaySecurityPolicies/swg-policy`. This policy is specific to gateways of type 'SECURE_WEB_GATEWAY'. + "ipVersion": "A String", # Optional. The IP Version that will be used by this gateway. Valid options are IPV4 or IPV6. Default is IPV4. "labels": { # Optional. Set of label tags associated with the Gateway resource. "a_key": "A String", }, @@ -224,7 +225,7 @@

Method Details

Returns: An object of the form: - { # Gateway represents the configuration for a proxy, typically a load balancer. It captures the ip:port over which the services are exposed by the proxy, along with any policy configurations. Routes have reference to to Gateways to dictate how requests should be routed by this Gateway. + { # Gateway represents the configuration for a proxy, typically a load balancer. It captures the ip:port over which the services are exposed by the proxy, along with any policy configurations. Routes have reference to to Gateways to dictate how requests should be routed by this Gateway. Next id: 29 "addresses": [ # Optional. Zero or one IPv4 or IPv6 address on which the Gateway will receive the traffic. When no address is provided, an IP from the subnetwork is allocated This field only applies to gateways of type 'SECURE_WEB_GATEWAY'. Gateways of type 'OPEN_MESH' listen on 0.0.0.0 for IPv4 and :: for IPv6. "A String", ], @@ -234,6 +235,7 @@

Method Details

"createTime": "A String", # Output only. The timestamp when the resource was created. "description": "A String", # Optional. A free-text description of the resource. Max length 1024 characters. "gatewaySecurityPolicy": "A String", # Optional. A fully-qualified GatewaySecurityPolicy URL reference. Defines how a server should apply security policy to inbound (VM to Proxy) initiated connections. For example: `projects/*/locations/*/gatewaySecurityPolicies/swg-policy`. This policy is specific to gateways of type 'SECURE_WEB_GATEWAY'. + "ipVersion": "A String", # Optional. The IP Version that will be used by this gateway. Valid options are IPV4 or IPV6. Default is IPV4. "labels": { # Optional. Set of label tags associated with the Gateway resource. "a_key": "A String", }, @@ -317,7 +319,7 @@

Method Details

{ # Response returned by the ListGateways method. "gateways": [ # List of Gateway resources. - { # Gateway represents the configuration for a proxy, typically a load balancer. It captures the ip:port over which the services are exposed by the proxy, along with any policy configurations. Routes have reference to to Gateways to dictate how requests should be routed by this Gateway. + { # Gateway represents the configuration for a proxy, typically a load balancer. It captures the ip:port over which the services are exposed by the proxy, along with any policy configurations. Routes have reference to to Gateways to dictate how requests should be routed by this Gateway. Next id: 29 "addresses": [ # Optional. Zero or one IPv4 or IPv6 address on which the Gateway will receive the traffic. When no address is provided, an IP from the subnetwork is allocated This field only applies to gateways of type 'SECURE_WEB_GATEWAY'. Gateways of type 'OPEN_MESH' listen on 0.0.0.0 for IPv4 and :: for IPv6. "A String", ], @@ -327,6 +329,7 @@

Method Details

"createTime": "A String", # Output only. The timestamp when the resource was created. "description": "A String", # Optional. A free-text description of the resource. Max length 1024 characters. "gatewaySecurityPolicy": "A String", # Optional. A fully-qualified GatewaySecurityPolicy URL reference. Defines how a server should apply security policy to inbound (VM to Proxy) initiated connections. For example: `projects/*/locations/*/gatewaySecurityPolicies/swg-policy`. This policy is specific to gateways of type 'SECURE_WEB_GATEWAY'. + "ipVersion": "A String", # Optional. The IP Version that will be used by this gateway. Valid options are IPV4 or IPV6. Default is IPV4. "labels": { # Optional. Set of label tags associated with the Gateway resource. "a_key": "A String", }, @@ -373,7 +376,7 @@

Method Details

body: object, The request body. The object takes the form of: -{ # Gateway represents the configuration for a proxy, typically a load balancer. It captures the ip:port over which the services are exposed by the proxy, along with any policy configurations. Routes have reference to to Gateways to dictate how requests should be routed by this Gateway. +{ # Gateway represents the configuration for a proxy, typically a load balancer. It captures the ip:port over which the services are exposed by the proxy, along with any policy configurations. Routes have reference to to Gateways to dictate how requests should be routed by this Gateway. Next id: 29 "addresses": [ # Optional. Zero or one IPv4 or IPv6 address on which the Gateway will receive the traffic. When no address is provided, an IP from the subnetwork is allocated This field only applies to gateways of type 'SECURE_WEB_GATEWAY'. Gateways of type 'OPEN_MESH' listen on 0.0.0.0 for IPv4 and :: for IPv6. "A String", ], @@ -383,6 +386,7 @@

Method Details

"createTime": "A String", # Output only. The timestamp when the resource was created. "description": "A String", # Optional. A free-text description of the resource. Max length 1024 characters. "gatewaySecurityPolicy": "A String", # Optional. A fully-qualified GatewaySecurityPolicy URL reference. Defines how a server should apply security policy to inbound (VM to Proxy) initiated connections. For example: `projects/*/locations/*/gatewaySecurityPolicies/swg-policy`. This policy is specific to gateways of type 'SECURE_WEB_GATEWAY'. + "ipVersion": "A String", # Optional. The IP Version that will be used by this gateway. Valid options are IPV4 or IPV6. Default is IPV4. "labels": { # Optional. Set of label tags associated with the Gateway resource. "a_key": "A String", }, diff --git a/docs/dyn/networkservices_v1beta1.projects.locations.lbRouteExtensions.html b/docs/dyn/networkservices_v1beta1.projects.locations.lbRouteExtensions.html index a62286d4064..36eb1ff1ca7 100644 --- a/docs/dyn/networkservices_v1beta1.projects.locations.lbRouteExtensions.html +++ b/docs/dyn/networkservices_v1beta1.projects.locations.lbRouteExtensions.html @@ -123,7 +123,7 @@

Method Details

"A String", ], "name": "A String", # Required. The name for this extension. The name is logged as part of the HTTP request logs. The name must conform with RFC-1034, is restricted to lower-cased letters, numbers and hyphens, and can have a maximum length of 63 characters. Additionally, the first character must be a letter and the last a letter or a number. - "service": "A String", # Required. The reference to the service that runs the extension. Must be a reference to a [backend service](https://cloud.google.com/compute/docs/reference/rest/v1/backendServices). + "service": "A String", # Required. The reference to the service that runs the extension. Currently only Callout extensions are supported here. To configure a Callout extension, `service` must be a fully-qualified reference to a [backend service](https://cloud.google.com/compute/docs/reference/rest/v1/backendServices) in the format: `https://www.googleapis.com/compute/v1/projects/{project}/regions/{region}/backendServices/{backendService}` or `https://www.googleapis.com/compute/v1/projects/{project}/global/backendServices/{backendService}`. "supportedEvents": [ # Optional. A set of events during request or response processing for which this extension is called. This field is required for the `LbTrafficExtension` resource. It's not relevant for the `LbRouteExtension` resource. "A String", ], @@ -131,7 +131,7 @@

Method Details

}, ], "matchCondition": { # Conditions under which this chain is invoked for a request. # Required. Conditions under which this chain is invoked for a request. - "celExpression": "A String", # Required. A Common Expression Language (CEL) expression that is used to match requests for which the extension chain is executed. + "celExpression": "A String", # Required. A Common Expression Language (CEL) expression that is used to match requests for which the extension chain is executed. For more information, see [CEL matcher language reference](https://cloud.google.com/service-extensions/docs/cel-matcher-language-reference). }, "name": "A String", # Required. The name for this extension chain. The name is logged as part of the HTTP request logs. The name must conform with RFC-1034, is restricted to lower-cased letters, numbers and hyphens, and can have a maximum length of 63 characters. Additionally, the first character must be a letter and the last a letter or a number. }, @@ -241,7 +241,7 @@

Method Details

"A String", ], "name": "A String", # Required. The name for this extension. The name is logged as part of the HTTP request logs. The name must conform with RFC-1034, is restricted to lower-cased letters, numbers and hyphens, and can have a maximum length of 63 characters. Additionally, the first character must be a letter and the last a letter or a number. - "service": "A String", # Required. The reference to the service that runs the extension. Must be a reference to a [backend service](https://cloud.google.com/compute/docs/reference/rest/v1/backendServices). + "service": "A String", # Required. The reference to the service that runs the extension. Currently only Callout extensions are supported here. To configure a Callout extension, `service` must be a fully-qualified reference to a [backend service](https://cloud.google.com/compute/docs/reference/rest/v1/backendServices) in the format: `https://www.googleapis.com/compute/v1/projects/{project}/regions/{region}/backendServices/{backendService}` or `https://www.googleapis.com/compute/v1/projects/{project}/global/backendServices/{backendService}`. "supportedEvents": [ # Optional. A set of events during request or response processing for which this extension is called. This field is required for the `LbTrafficExtension` resource. It's not relevant for the `LbRouteExtension` resource. "A String", ], @@ -249,7 +249,7 @@

Method Details

}, ], "matchCondition": { # Conditions under which this chain is invoked for a request. # Required. Conditions under which this chain is invoked for a request. - "celExpression": "A String", # Required. A Common Expression Language (CEL) expression that is used to match requests for which the extension chain is executed. + "celExpression": "A String", # Required. A Common Expression Language (CEL) expression that is used to match requests for which the extension chain is executed. For more information, see [CEL matcher language reference](https://cloud.google.com/service-extensions/docs/cel-matcher-language-reference). }, "name": "A String", # Required. The name for this extension chain. The name is logged as part of the HTTP request logs. The name must conform with RFC-1034, is restricted to lower-cased letters, numbers and hyphens, and can have a maximum length of 63 characters. Additionally, the first character must be a letter and the last a letter or a number. }, @@ -299,7 +299,7 @@

Method Details

"A String", ], "name": "A String", # Required. The name for this extension. The name is logged as part of the HTTP request logs. The name must conform with RFC-1034, is restricted to lower-cased letters, numbers and hyphens, and can have a maximum length of 63 characters. Additionally, the first character must be a letter and the last a letter or a number. - "service": "A String", # Required. The reference to the service that runs the extension. Must be a reference to a [backend service](https://cloud.google.com/compute/docs/reference/rest/v1/backendServices). + "service": "A String", # Required. The reference to the service that runs the extension. Currently only Callout extensions are supported here. To configure a Callout extension, `service` must be a fully-qualified reference to a [backend service](https://cloud.google.com/compute/docs/reference/rest/v1/backendServices) in the format: `https://www.googleapis.com/compute/v1/projects/{project}/regions/{region}/backendServices/{backendService}` or `https://www.googleapis.com/compute/v1/projects/{project}/global/backendServices/{backendService}`. "supportedEvents": [ # Optional. A set of events during request or response processing for which this extension is called. This field is required for the `LbTrafficExtension` resource. It's not relevant for the `LbRouteExtension` resource. "A String", ], @@ -307,7 +307,7 @@

Method Details

}, ], "matchCondition": { # Conditions under which this chain is invoked for a request. # Required. Conditions under which this chain is invoked for a request. - "celExpression": "A String", # Required. A Common Expression Language (CEL) expression that is used to match requests for which the extension chain is executed. + "celExpression": "A String", # Required. A Common Expression Language (CEL) expression that is used to match requests for which the extension chain is executed. For more information, see [CEL matcher language reference](https://cloud.google.com/service-extensions/docs/cel-matcher-language-reference). }, "name": "A String", # Required. The name for this extension chain. The name is logged as part of the HTTP request logs. The name must conform with RFC-1034, is restricted to lower-cased letters, numbers and hyphens, and can have a maximum length of 63 characters. Additionally, the first character must be a letter and the last a letter or a number. }, @@ -366,7 +366,7 @@

Method Details

"A String", ], "name": "A String", # Required. The name for this extension. The name is logged as part of the HTTP request logs. The name must conform with RFC-1034, is restricted to lower-cased letters, numbers and hyphens, and can have a maximum length of 63 characters. Additionally, the first character must be a letter and the last a letter or a number. - "service": "A String", # Required. The reference to the service that runs the extension. Must be a reference to a [backend service](https://cloud.google.com/compute/docs/reference/rest/v1/backendServices). + "service": "A String", # Required. The reference to the service that runs the extension. Currently only Callout extensions are supported here. To configure a Callout extension, `service` must be a fully-qualified reference to a [backend service](https://cloud.google.com/compute/docs/reference/rest/v1/backendServices) in the format: `https://www.googleapis.com/compute/v1/projects/{project}/regions/{region}/backendServices/{backendService}` or `https://www.googleapis.com/compute/v1/projects/{project}/global/backendServices/{backendService}`. "supportedEvents": [ # Optional. A set of events during request or response processing for which this extension is called. This field is required for the `LbTrafficExtension` resource. It's not relevant for the `LbRouteExtension` resource. "A String", ], @@ -374,7 +374,7 @@

Method Details

}, ], "matchCondition": { # Conditions under which this chain is invoked for a request. # Required. Conditions under which this chain is invoked for a request. - "celExpression": "A String", # Required. A Common Expression Language (CEL) expression that is used to match requests for which the extension chain is executed. + "celExpression": "A String", # Required. A Common Expression Language (CEL) expression that is used to match requests for which the extension chain is executed. For more information, see [CEL matcher language reference](https://cloud.google.com/service-extensions/docs/cel-matcher-language-reference). }, "name": "A String", # Required. The name for this extension chain. The name is logged as part of the HTTP request logs. The name must conform with RFC-1034, is restricted to lower-cased letters, numbers and hyphens, and can have a maximum length of 63 characters. Additionally, the first character must be a letter and the last a letter or a number. }, diff --git a/docs/dyn/networkservices_v1beta1.projects.locations.lbTrafficExtensions.html b/docs/dyn/networkservices_v1beta1.projects.locations.lbTrafficExtensions.html index aa58eadf277..83f2a55086b 100644 --- a/docs/dyn/networkservices_v1beta1.projects.locations.lbTrafficExtensions.html +++ b/docs/dyn/networkservices_v1beta1.projects.locations.lbTrafficExtensions.html @@ -123,7 +123,7 @@

Method Details

"A String", ], "name": "A String", # Required. The name for this extension. The name is logged as part of the HTTP request logs. The name must conform with RFC-1034, is restricted to lower-cased letters, numbers and hyphens, and can have a maximum length of 63 characters. Additionally, the first character must be a letter and the last a letter or a number. - "service": "A String", # Required. The reference to the service that runs the extension. Must be a reference to a [backend service](https://cloud.google.com/compute/docs/reference/rest/v1/backendServices). + "service": "A String", # Required. The reference to the service that runs the extension. Currently only Callout extensions are supported here. To configure a Callout extension, `service` must be a fully-qualified reference to a [backend service](https://cloud.google.com/compute/docs/reference/rest/v1/backendServices) in the format: `https://www.googleapis.com/compute/v1/projects/{project}/regions/{region}/backendServices/{backendService}` or `https://www.googleapis.com/compute/v1/projects/{project}/global/backendServices/{backendService}`. "supportedEvents": [ # Optional. A set of events during request or response processing for which this extension is called. This field is required for the `LbTrafficExtension` resource. It's not relevant for the `LbRouteExtension` resource. "A String", ], @@ -131,7 +131,7 @@

Method Details

}, ], "matchCondition": { # Conditions under which this chain is invoked for a request. # Required. Conditions under which this chain is invoked for a request. - "celExpression": "A String", # Required. A Common Expression Language (CEL) expression that is used to match requests for which the extension chain is executed. + "celExpression": "A String", # Required. A Common Expression Language (CEL) expression that is used to match requests for which the extension chain is executed. For more information, see [CEL matcher language reference](https://cloud.google.com/service-extensions/docs/cel-matcher-language-reference). }, "name": "A String", # Required. The name for this extension chain. The name is logged as part of the HTTP request logs. The name must conform with RFC-1034, is restricted to lower-cased letters, numbers and hyphens, and can have a maximum length of 63 characters. Additionally, the first character must be a letter and the last a letter or a number. }, @@ -241,7 +241,7 @@

Method Details

"A String", ], "name": "A String", # Required. The name for this extension. The name is logged as part of the HTTP request logs. The name must conform with RFC-1034, is restricted to lower-cased letters, numbers and hyphens, and can have a maximum length of 63 characters. Additionally, the first character must be a letter and the last a letter or a number. - "service": "A String", # Required. The reference to the service that runs the extension. Must be a reference to a [backend service](https://cloud.google.com/compute/docs/reference/rest/v1/backendServices). + "service": "A String", # Required. The reference to the service that runs the extension. Currently only Callout extensions are supported here. To configure a Callout extension, `service` must be a fully-qualified reference to a [backend service](https://cloud.google.com/compute/docs/reference/rest/v1/backendServices) in the format: `https://www.googleapis.com/compute/v1/projects/{project}/regions/{region}/backendServices/{backendService}` or `https://www.googleapis.com/compute/v1/projects/{project}/global/backendServices/{backendService}`. "supportedEvents": [ # Optional. A set of events during request or response processing for which this extension is called. This field is required for the `LbTrafficExtension` resource. It's not relevant for the `LbRouteExtension` resource. "A String", ], @@ -249,7 +249,7 @@

Method Details

}, ], "matchCondition": { # Conditions under which this chain is invoked for a request. # Required. Conditions under which this chain is invoked for a request. - "celExpression": "A String", # Required. A Common Expression Language (CEL) expression that is used to match requests for which the extension chain is executed. + "celExpression": "A String", # Required. A Common Expression Language (CEL) expression that is used to match requests for which the extension chain is executed. For more information, see [CEL matcher language reference](https://cloud.google.com/service-extensions/docs/cel-matcher-language-reference). }, "name": "A String", # Required. The name for this extension chain. The name is logged as part of the HTTP request logs. The name must conform with RFC-1034, is restricted to lower-cased letters, numbers and hyphens, and can have a maximum length of 63 characters. Additionally, the first character must be a letter and the last a letter or a number. }, @@ -299,7 +299,7 @@

Method Details

"A String", ], "name": "A String", # Required. The name for this extension. The name is logged as part of the HTTP request logs. The name must conform with RFC-1034, is restricted to lower-cased letters, numbers and hyphens, and can have a maximum length of 63 characters. Additionally, the first character must be a letter and the last a letter or a number. - "service": "A String", # Required. The reference to the service that runs the extension. Must be a reference to a [backend service](https://cloud.google.com/compute/docs/reference/rest/v1/backendServices). + "service": "A String", # Required. The reference to the service that runs the extension. Currently only Callout extensions are supported here. To configure a Callout extension, `service` must be a fully-qualified reference to a [backend service](https://cloud.google.com/compute/docs/reference/rest/v1/backendServices) in the format: `https://www.googleapis.com/compute/v1/projects/{project}/regions/{region}/backendServices/{backendService}` or `https://www.googleapis.com/compute/v1/projects/{project}/global/backendServices/{backendService}`. "supportedEvents": [ # Optional. A set of events during request or response processing for which this extension is called. This field is required for the `LbTrafficExtension` resource. It's not relevant for the `LbRouteExtension` resource. "A String", ], @@ -307,7 +307,7 @@

Method Details

}, ], "matchCondition": { # Conditions under which this chain is invoked for a request. # Required. Conditions under which this chain is invoked for a request. - "celExpression": "A String", # Required. A Common Expression Language (CEL) expression that is used to match requests for which the extension chain is executed. + "celExpression": "A String", # Required. A Common Expression Language (CEL) expression that is used to match requests for which the extension chain is executed. For more information, see [CEL matcher language reference](https://cloud.google.com/service-extensions/docs/cel-matcher-language-reference). }, "name": "A String", # Required. The name for this extension chain. The name is logged as part of the HTTP request logs. The name must conform with RFC-1034, is restricted to lower-cased letters, numbers and hyphens, and can have a maximum length of 63 characters. Additionally, the first character must be a letter and the last a letter or a number. }, @@ -366,7 +366,7 @@

Method Details

"A String", ], "name": "A String", # Required. The name for this extension. The name is logged as part of the HTTP request logs. The name must conform with RFC-1034, is restricted to lower-cased letters, numbers and hyphens, and can have a maximum length of 63 characters. Additionally, the first character must be a letter and the last a letter or a number. - "service": "A String", # Required. The reference to the service that runs the extension. Must be a reference to a [backend service](https://cloud.google.com/compute/docs/reference/rest/v1/backendServices). + "service": "A String", # Required. The reference to the service that runs the extension. Currently only Callout extensions are supported here. To configure a Callout extension, `service` must be a fully-qualified reference to a [backend service](https://cloud.google.com/compute/docs/reference/rest/v1/backendServices) in the format: `https://www.googleapis.com/compute/v1/projects/{project}/regions/{region}/backendServices/{backendService}` or `https://www.googleapis.com/compute/v1/projects/{project}/global/backendServices/{backendService}`. "supportedEvents": [ # Optional. A set of events during request or response processing for which this extension is called. This field is required for the `LbTrafficExtension` resource. It's not relevant for the `LbRouteExtension` resource. "A String", ], @@ -374,7 +374,7 @@

Method Details

}, ], "matchCondition": { # Conditions under which this chain is invoked for a request. # Required. Conditions under which this chain is invoked for a request. - "celExpression": "A String", # Required. A Common Expression Language (CEL) expression that is used to match requests for which the extension chain is executed. + "celExpression": "A String", # Required. A Common Expression Language (CEL) expression that is used to match requests for which the extension chain is executed. For more information, see [CEL matcher language reference](https://cloud.google.com/service-extensions/docs/cel-matcher-language-reference). }, "name": "A String", # Required. The name for this extension chain. The name is logged as part of the HTTP request logs. The name must conform with RFC-1034, is restricted to lower-cased letters, numbers and hyphens, and can have a maximum length of 63 characters. Additionally, the first character must be a letter and the last a letter or a number. }, diff --git a/docs/dyn/notebooks_v2.projects.locations.instances.html b/docs/dyn/notebooks_v2.projects.locations.instances.html index 8597fd3985c..c9ed275e18f 100644 --- a/docs/dyn/notebooks_v2.projects.locations.instances.html +++ b/docs/dyn/notebooks_v2.projects.locations.instances.html @@ -253,6 +253,7 @@

Method Details

"name": "A String", # Output only. The name of this notebook instance. Format: `projects/{project_id}/locations/{location}/instances/{instance_id}` "proxyUri": "A String", # Output only. The proxy endpoint that is used to access the Jupyter notebook. "state": "A String", # Output only. The state of this instance. + "thirdPartyProxyUrl": "A String", # Output only. The workforce pools proxy endpoint that is used to access the Jupyter notebook. "updateTime": "A String", # Output only. Instance update time. "upgradeHistory": [ # Output only. The upgrade history of this instance. { # The entry of VM image upgrade history. @@ -481,6 +482,7 @@

Method Details

"name": "A String", # Output only. The name of this notebook instance. Format: `projects/{project_id}/locations/{location}/instances/{instance_id}` "proxyUri": "A String", # Output only. The proxy endpoint that is used to access the Jupyter notebook. "state": "A String", # Output only. The state of this instance. + "thirdPartyProxyUrl": "A String", # Output only. The workforce pools proxy endpoint that is used to access the Jupyter notebook. "updateTime": "A String", # Output only. Instance update time. "upgradeHistory": [ # Output only. The upgrade history of this instance. { # The entry of VM image upgrade history. @@ -670,6 +672,7 @@

Method Details

"name": "A String", # Output only. The name of this notebook instance. Format: `projects/{project_id}/locations/{location}/instances/{instance_id}` "proxyUri": "A String", # Output only. The proxy endpoint that is used to access the Jupyter notebook. "state": "A String", # Output only. The state of this instance. + "thirdPartyProxyUrl": "A String", # Output only. The workforce pools proxy endpoint that is used to access the Jupyter notebook. "updateTime": "A String", # Output only. Instance update time. "upgradeHistory": [ # Output only. The upgrade history of this instance. { # The entry of VM image upgrade history. @@ -798,6 +801,7 @@

Method Details

"name": "A String", # Output only. The name of this notebook instance. Format: `projects/{project_id}/locations/{location}/instances/{instance_id}` "proxyUri": "A String", # Output only. The proxy endpoint that is used to access the Jupyter notebook. "state": "A String", # Output only. The state of this instance. + "thirdPartyProxyUrl": "A String", # Output only. The workforce pools proxy endpoint that is used to access the Jupyter notebook. "updateTime": "A String", # Output only. Instance update time. "upgradeHistory": [ # Output only. The upgrade history of this instance. { # The entry of VM image upgrade history. diff --git a/docs/dyn/orgpolicy_v2.folders.policies.html b/docs/dyn/orgpolicy_v2.folders.policies.html index 24c518794d9..45a10b73b6f 100644 --- a/docs/dyn/orgpolicy_v2.folders.policies.html +++ b/docs/dyn/orgpolicy_v2.folders.policies.html @@ -81,7 +81,7 @@

Instance Methods

create(parent, body=None, x__xgafv=None)

Creates a policy. Returns a `google.rpc.Status` with `google.rpc.Code.NOT_FOUND` if the constraint does not exist. Returns a `google.rpc.Status` with `google.rpc.Code.ALREADY_EXISTS` if the policy already exists on the given Google Cloud resource.

- delete(name, x__xgafv=None)

+ delete(name, etag=None, x__xgafv=None)

Deletes a policy. Returns a `google.rpc.Status` with `google.rpc.Code.NOT_FOUND` if the constraint or organization policy does not exist.

get(name, x__xgafv=None)

@@ -171,6 +171,7 @@

Method Details

], "updateTime": "A String", # Output only. The time stamp this was previously updated. This represents the last time a call to `CreatePolicy` or `UpdatePolicy` was made for that policy. }, + "etag": "A String", # Optional. An opaque tag indicating the current state of the policy, used for concurrency control. This 'etag' is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. "name": "A String", # Immutable. The resource name of the policy. Must be one of the following forms, where `constraint_name` is the name of the constraint which this policy configures: * `projects/{project_number}/policies/{constraint_name}` * `folders/{folder_id}/policies/{constraint_name}` * `organizations/{organization_id}/policies/{constraint_name}` For example, `projects/123/policies/compute.disableSerialPortAccess`. Note: `projects/{project_id}/policies/{constraint_name}` is also an acceptable name for API requests, but responses will return the name using the equivalent project number. "spec": { # Defines a Google Cloud policy specification which is used to specify constraints for configurations of Google Cloud resources. # Basic information about the Organization Policy. "etag": "A String", # An opaque tag indicating the current version of the policySpec, used for concurrency control. This field is ignored if used in a `CreatePolicy` request. When the policy is returned from either a `GetPolicy` or a `ListPolicies` request, this `etag` indicates the version of the current policySpec to use when executing a read-modify-write loop. When the policy is returned from a `GetEffectivePolicy` request, the `etag` will be unset. @@ -267,6 +268,7 @@

Method Details

], "updateTime": "A String", # Output only. The time stamp this was previously updated. This represents the last time a call to `CreatePolicy` or `UpdatePolicy` was made for that policy. }, + "etag": "A String", # Optional. An opaque tag indicating the current state of the policy, used for concurrency control. This 'etag' is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. "name": "A String", # Immutable. The resource name of the policy. Must be one of the following forms, where `constraint_name` is the name of the constraint which this policy configures: * `projects/{project_number}/policies/{constraint_name}` * `folders/{folder_id}/policies/{constraint_name}` * `organizations/{organization_id}/policies/{constraint_name}` For example, `projects/123/policies/compute.disableSerialPortAccess`. Note: `projects/{project_id}/policies/{constraint_name}` is also an acceptable name for API requests, but responses will return the name using the equivalent project number. "spec": { # Defines a Google Cloud policy specification which is used to specify constraints for configurations of Google Cloud resources. # Basic information about the Organization Policy. "etag": "A String", # An opaque tag indicating the current version of the policySpec, used for concurrency control. This field is ignored if used in a `CreatePolicy` request. When the policy is returned from either a `GetPolicy` or a `ListPolicies` request, this `etag` indicates the version of the current policySpec to use when executing a read-modify-write loop. When the policy is returned from a `GetEffectivePolicy` request, the `etag` will be unset. @@ -299,11 +301,12 @@

Method Details

- delete(name, x__xgafv=None) + delete(name, etag=None, x__xgafv=None)
Deletes a policy. Returns a `google.rpc.Status` with `google.rpc.Code.NOT_FOUND` if the constraint or organization policy does not exist.
 
 Args:
   name: string, Required. Name of the policy to delete. See the policy entry for naming rules. (required)
+  etag: string, Optional. The current etag of policy. If an etag is provided and does not match the current etag of the policy, deletion will be blocked and an ABORTED error will be returned.
   x__xgafv: string, V1 error format.
     Allowed values
       1 - v1 error format
@@ -388,6 +391,7 @@ 

Method Details

], "updateTime": "A String", # Output only. The time stamp this was previously updated. This represents the last time a call to `CreatePolicy` or `UpdatePolicy` was made for that policy. }, + "etag": "A String", # Optional. An opaque tag indicating the current state of the policy, used for concurrency control. This 'etag' is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. "name": "A String", # Immutable. The resource name of the policy. Must be one of the following forms, where `constraint_name` is the name of the constraint which this policy configures: * `projects/{project_number}/policies/{constraint_name}` * `folders/{folder_id}/policies/{constraint_name}` * `organizations/{organization_id}/policies/{constraint_name}` For example, `projects/123/policies/compute.disableSerialPortAccess`. Note: `projects/{project_id}/policies/{constraint_name}` is also an acceptable name for API requests, but responses will return the name using the equivalent project number. "spec": { # Defines a Google Cloud policy specification which is used to specify constraints for configurations of Google Cloud resources. # Basic information about the Organization Policy. "etag": "A String", # An opaque tag indicating the current version of the policySpec, used for concurrency control. This field is ignored if used in a `CreatePolicy` request. When the policy is returned from either a `GetPolicy` or a `ListPolicies` request, this `etag` indicates the version of the current policySpec to use when executing a read-modify-write loop. When the policy is returned from a `GetEffectivePolicy` request, the `etag` will be unset. @@ -491,6 +495,7 @@

Method Details

], "updateTime": "A String", # Output only. The time stamp this was previously updated. This represents the last time a call to `CreatePolicy` or `UpdatePolicy` was made for that policy. }, + "etag": "A String", # Optional. An opaque tag indicating the current state of the policy, used for concurrency control. This 'etag' is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. "name": "A String", # Immutable. The resource name of the policy. Must be one of the following forms, where `constraint_name` is the name of the constraint which this policy configures: * `projects/{project_number}/policies/{constraint_name}` * `folders/{folder_id}/policies/{constraint_name}` * `organizations/{organization_id}/policies/{constraint_name}` For example, `projects/123/policies/compute.disableSerialPortAccess`. Note: `projects/{project_id}/policies/{constraint_name}` is also an acceptable name for API requests, but responses will return the name using the equivalent project number. "spec": { # Defines a Google Cloud policy specification which is used to specify constraints for configurations of Google Cloud resources. # Basic information about the Organization Policy. "etag": "A String", # An opaque tag indicating the current version of the policySpec, used for concurrency control. This field is ignored if used in a `CreatePolicy` request. When the policy is returned from either a `GetPolicy` or a `ListPolicies` request, this `etag` indicates the version of the current policySpec to use when executing a read-modify-write loop. When the policy is returned from a `GetEffectivePolicy` request, the `etag` will be unset. @@ -599,6 +604,7 @@

Method Details

], "updateTime": "A String", # Output only. The time stamp this was previously updated. This represents the last time a call to `CreatePolicy` or `UpdatePolicy` was made for that policy. }, + "etag": "A String", # Optional. An opaque tag indicating the current state of the policy, used for concurrency control. This 'etag' is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. "name": "A String", # Immutable. The resource name of the policy. Must be one of the following forms, where `constraint_name` is the name of the constraint which this policy configures: * `projects/{project_number}/policies/{constraint_name}` * `folders/{folder_id}/policies/{constraint_name}` * `organizations/{organization_id}/policies/{constraint_name}` For example, `projects/123/policies/compute.disableSerialPortAccess`. Note: `projects/{project_id}/policies/{constraint_name}` is also an acceptable name for API requests, but responses will return the name using the equivalent project number. "spec": { # Defines a Google Cloud policy specification which is used to specify constraints for configurations of Google Cloud resources. # Basic information about the Organization Policy. "etag": "A String", # An opaque tag indicating the current version of the policySpec, used for concurrency control. This field is ignored if used in a `CreatePolicy` request. When the policy is returned from either a `GetPolicy` or a `ListPolicies` request, this `etag` indicates the version of the current policySpec to use when executing a read-modify-write loop. When the policy is returned from a `GetEffectivePolicy` request, the `etag` will be unset. @@ -713,6 +719,7 @@

Method Details

], "updateTime": "A String", # Output only. The time stamp this was previously updated. This represents the last time a call to `CreatePolicy` or `UpdatePolicy` was made for that policy. }, + "etag": "A String", # Optional. An opaque tag indicating the current state of the policy, used for concurrency control. This 'etag' is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. "name": "A String", # Immutable. The resource name of the policy. Must be one of the following forms, where `constraint_name` is the name of the constraint which this policy configures: * `projects/{project_number}/policies/{constraint_name}` * `folders/{folder_id}/policies/{constraint_name}` * `organizations/{organization_id}/policies/{constraint_name}` For example, `projects/123/policies/compute.disableSerialPortAccess`. Note: `projects/{project_id}/policies/{constraint_name}` is also an acceptable name for API requests, but responses will return the name using the equivalent project number. "spec": { # Defines a Google Cloud policy specification which is used to specify constraints for configurations of Google Cloud resources. # Basic information about the Organization Policy. "etag": "A String", # An opaque tag indicating the current version of the policySpec, used for concurrency control. This field is ignored if used in a `CreatePolicy` request. When the policy is returned from either a `GetPolicy` or a `ListPolicies` request, this `etag` indicates the version of the current policySpec to use when executing a read-modify-write loop. When the policy is returned from a `GetEffectivePolicy` request, the `etag` will be unset. @@ -810,6 +817,7 @@

Method Details

], "updateTime": "A String", # Output only. The time stamp this was previously updated. This represents the last time a call to `CreatePolicy` or `UpdatePolicy` was made for that policy. }, + "etag": "A String", # Optional. An opaque tag indicating the current state of the policy, used for concurrency control. This 'etag' is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. "name": "A String", # Immutable. The resource name of the policy. Must be one of the following forms, where `constraint_name` is the name of the constraint which this policy configures: * `projects/{project_number}/policies/{constraint_name}` * `folders/{folder_id}/policies/{constraint_name}` * `organizations/{organization_id}/policies/{constraint_name}` For example, `projects/123/policies/compute.disableSerialPortAccess`. Note: `projects/{project_id}/policies/{constraint_name}` is also an acceptable name for API requests, but responses will return the name using the equivalent project number. "spec": { # Defines a Google Cloud policy specification which is used to specify constraints for configurations of Google Cloud resources. # Basic information about the Organization Policy. "etag": "A String", # An opaque tag indicating the current version of the policySpec, used for concurrency control. This field is ignored if used in a `CreatePolicy` request. When the policy is returned from either a `GetPolicy` or a `ListPolicies` request, this `etag` indicates the version of the current policySpec to use when executing a read-modify-write loop. When the policy is returned from a `GetEffectivePolicy` request, the `etag` will be unset. diff --git a/docs/dyn/orgpolicy_v2.organizations.policies.html b/docs/dyn/orgpolicy_v2.organizations.policies.html index 88b3459e8d5..46702ab807e 100644 --- a/docs/dyn/orgpolicy_v2.organizations.policies.html +++ b/docs/dyn/orgpolicy_v2.organizations.policies.html @@ -81,7 +81,7 @@

Instance Methods

create(parent, body=None, x__xgafv=None)

Creates a policy. Returns a `google.rpc.Status` with `google.rpc.Code.NOT_FOUND` if the constraint does not exist. Returns a `google.rpc.Status` with `google.rpc.Code.ALREADY_EXISTS` if the policy already exists on the given Google Cloud resource.

- delete(name, x__xgafv=None)

+ delete(name, etag=None, x__xgafv=None)

Deletes a policy. Returns a `google.rpc.Status` with `google.rpc.Code.NOT_FOUND` if the constraint or organization policy does not exist.

get(name, x__xgafv=None)

@@ -171,6 +171,7 @@

Method Details

], "updateTime": "A String", # Output only. The time stamp this was previously updated. This represents the last time a call to `CreatePolicy` or `UpdatePolicy` was made for that policy. }, + "etag": "A String", # Optional. An opaque tag indicating the current state of the policy, used for concurrency control. This 'etag' is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. "name": "A String", # Immutable. The resource name of the policy. Must be one of the following forms, where `constraint_name` is the name of the constraint which this policy configures: * `projects/{project_number}/policies/{constraint_name}` * `folders/{folder_id}/policies/{constraint_name}` * `organizations/{organization_id}/policies/{constraint_name}` For example, `projects/123/policies/compute.disableSerialPortAccess`. Note: `projects/{project_id}/policies/{constraint_name}` is also an acceptable name for API requests, but responses will return the name using the equivalent project number. "spec": { # Defines a Google Cloud policy specification which is used to specify constraints for configurations of Google Cloud resources. # Basic information about the Organization Policy. "etag": "A String", # An opaque tag indicating the current version of the policySpec, used for concurrency control. This field is ignored if used in a `CreatePolicy` request. When the policy is returned from either a `GetPolicy` or a `ListPolicies` request, this `etag` indicates the version of the current policySpec to use when executing a read-modify-write loop. When the policy is returned from a `GetEffectivePolicy` request, the `etag` will be unset. @@ -267,6 +268,7 @@

Method Details

], "updateTime": "A String", # Output only. The time stamp this was previously updated. This represents the last time a call to `CreatePolicy` or `UpdatePolicy` was made for that policy. }, + "etag": "A String", # Optional. An opaque tag indicating the current state of the policy, used for concurrency control. This 'etag' is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. "name": "A String", # Immutable. The resource name of the policy. Must be one of the following forms, where `constraint_name` is the name of the constraint which this policy configures: * `projects/{project_number}/policies/{constraint_name}` * `folders/{folder_id}/policies/{constraint_name}` * `organizations/{organization_id}/policies/{constraint_name}` For example, `projects/123/policies/compute.disableSerialPortAccess`. Note: `projects/{project_id}/policies/{constraint_name}` is also an acceptable name for API requests, but responses will return the name using the equivalent project number. "spec": { # Defines a Google Cloud policy specification which is used to specify constraints for configurations of Google Cloud resources. # Basic information about the Organization Policy. "etag": "A String", # An opaque tag indicating the current version of the policySpec, used for concurrency control. This field is ignored if used in a `CreatePolicy` request. When the policy is returned from either a `GetPolicy` or a `ListPolicies` request, this `etag` indicates the version of the current policySpec to use when executing a read-modify-write loop. When the policy is returned from a `GetEffectivePolicy` request, the `etag` will be unset. @@ -299,11 +301,12 @@

Method Details

- delete(name, x__xgafv=None) + delete(name, etag=None, x__xgafv=None)
Deletes a policy. Returns a `google.rpc.Status` with `google.rpc.Code.NOT_FOUND` if the constraint or organization policy does not exist.
 
 Args:
   name: string, Required. Name of the policy to delete. See the policy entry for naming rules. (required)
+  etag: string, Optional. The current etag of policy. If an etag is provided and does not match the current etag of the policy, deletion will be blocked and an ABORTED error will be returned.
   x__xgafv: string, V1 error format.
     Allowed values
       1 - v1 error format
@@ -388,6 +391,7 @@ 

Method Details

], "updateTime": "A String", # Output only. The time stamp this was previously updated. This represents the last time a call to `CreatePolicy` or `UpdatePolicy` was made for that policy. }, + "etag": "A String", # Optional. An opaque tag indicating the current state of the policy, used for concurrency control. This 'etag' is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. "name": "A String", # Immutable. The resource name of the policy. Must be one of the following forms, where `constraint_name` is the name of the constraint which this policy configures: * `projects/{project_number}/policies/{constraint_name}` * `folders/{folder_id}/policies/{constraint_name}` * `organizations/{organization_id}/policies/{constraint_name}` For example, `projects/123/policies/compute.disableSerialPortAccess`. Note: `projects/{project_id}/policies/{constraint_name}` is also an acceptable name for API requests, but responses will return the name using the equivalent project number. "spec": { # Defines a Google Cloud policy specification which is used to specify constraints for configurations of Google Cloud resources. # Basic information about the Organization Policy. "etag": "A String", # An opaque tag indicating the current version of the policySpec, used for concurrency control. This field is ignored if used in a `CreatePolicy` request. When the policy is returned from either a `GetPolicy` or a `ListPolicies` request, this `etag` indicates the version of the current policySpec to use when executing a read-modify-write loop. When the policy is returned from a `GetEffectivePolicy` request, the `etag` will be unset. @@ -491,6 +495,7 @@

Method Details

], "updateTime": "A String", # Output only. The time stamp this was previously updated. This represents the last time a call to `CreatePolicy` or `UpdatePolicy` was made for that policy. }, + "etag": "A String", # Optional. An opaque tag indicating the current state of the policy, used for concurrency control. This 'etag' is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. "name": "A String", # Immutable. The resource name of the policy. Must be one of the following forms, where `constraint_name` is the name of the constraint which this policy configures: * `projects/{project_number}/policies/{constraint_name}` * `folders/{folder_id}/policies/{constraint_name}` * `organizations/{organization_id}/policies/{constraint_name}` For example, `projects/123/policies/compute.disableSerialPortAccess`. Note: `projects/{project_id}/policies/{constraint_name}` is also an acceptable name for API requests, but responses will return the name using the equivalent project number. "spec": { # Defines a Google Cloud policy specification which is used to specify constraints for configurations of Google Cloud resources. # Basic information about the Organization Policy. "etag": "A String", # An opaque tag indicating the current version of the policySpec, used for concurrency control. This field is ignored if used in a `CreatePolicy` request. When the policy is returned from either a `GetPolicy` or a `ListPolicies` request, this `etag` indicates the version of the current policySpec to use when executing a read-modify-write loop. When the policy is returned from a `GetEffectivePolicy` request, the `etag` will be unset. @@ -599,6 +604,7 @@

Method Details

], "updateTime": "A String", # Output only. The time stamp this was previously updated. This represents the last time a call to `CreatePolicy` or `UpdatePolicy` was made for that policy. }, + "etag": "A String", # Optional. An opaque tag indicating the current state of the policy, used for concurrency control. This 'etag' is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. "name": "A String", # Immutable. The resource name of the policy. Must be one of the following forms, where `constraint_name` is the name of the constraint which this policy configures: * `projects/{project_number}/policies/{constraint_name}` * `folders/{folder_id}/policies/{constraint_name}` * `organizations/{organization_id}/policies/{constraint_name}` For example, `projects/123/policies/compute.disableSerialPortAccess`. Note: `projects/{project_id}/policies/{constraint_name}` is also an acceptable name for API requests, but responses will return the name using the equivalent project number. "spec": { # Defines a Google Cloud policy specification which is used to specify constraints for configurations of Google Cloud resources. # Basic information about the Organization Policy. "etag": "A String", # An opaque tag indicating the current version of the policySpec, used for concurrency control. This field is ignored if used in a `CreatePolicy` request. When the policy is returned from either a `GetPolicy` or a `ListPolicies` request, this `etag` indicates the version of the current policySpec to use when executing a read-modify-write loop. When the policy is returned from a `GetEffectivePolicy` request, the `etag` will be unset. @@ -713,6 +719,7 @@

Method Details

], "updateTime": "A String", # Output only. The time stamp this was previously updated. This represents the last time a call to `CreatePolicy` or `UpdatePolicy` was made for that policy. }, + "etag": "A String", # Optional. An opaque tag indicating the current state of the policy, used for concurrency control. This 'etag' is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. "name": "A String", # Immutable. The resource name of the policy. Must be one of the following forms, where `constraint_name` is the name of the constraint which this policy configures: * `projects/{project_number}/policies/{constraint_name}` * `folders/{folder_id}/policies/{constraint_name}` * `organizations/{organization_id}/policies/{constraint_name}` For example, `projects/123/policies/compute.disableSerialPortAccess`. Note: `projects/{project_id}/policies/{constraint_name}` is also an acceptable name for API requests, but responses will return the name using the equivalent project number. "spec": { # Defines a Google Cloud policy specification which is used to specify constraints for configurations of Google Cloud resources. # Basic information about the Organization Policy. "etag": "A String", # An opaque tag indicating the current version of the policySpec, used for concurrency control. This field is ignored if used in a `CreatePolicy` request. When the policy is returned from either a `GetPolicy` or a `ListPolicies` request, this `etag` indicates the version of the current policySpec to use when executing a read-modify-write loop. When the policy is returned from a `GetEffectivePolicy` request, the `etag` will be unset. @@ -810,6 +817,7 @@

Method Details

], "updateTime": "A String", # Output only. The time stamp this was previously updated. This represents the last time a call to `CreatePolicy` or `UpdatePolicy` was made for that policy. }, + "etag": "A String", # Optional. An opaque tag indicating the current state of the policy, used for concurrency control. This 'etag' is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. "name": "A String", # Immutable. The resource name of the policy. Must be one of the following forms, where `constraint_name` is the name of the constraint which this policy configures: * `projects/{project_number}/policies/{constraint_name}` * `folders/{folder_id}/policies/{constraint_name}` * `organizations/{organization_id}/policies/{constraint_name}` For example, `projects/123/policies/compute.disableSerialPortAccess`. Note: `projects/{project_id}/policies/{constraint_name}` is also an acceptable name for API requests, but responses will return the name using the equivalent project number. "spec": { # Defines a Google Cloud policy specification which is used to specify constraints for configurations of Google Cloud resources. # Basic information about the Organization Policy. "etag": "A String", # An opaque tag indicating the current version of the policySpec, used for concurrency control. This field is ignored if used in a `CreatePolicy` request. When the policy is returned from either a `GetPolicy` or a `ListPolicies` request, this `etag` indicates the version of the current policySpec to use when executing a read-modify-write loop. When the policy is returned from a `GetEffectivePolicy` request, the `etag` will be unset. diff --git a/docs/dyn/orgpolicy_v2.projects.policies.html b/docs/dyn/orgpolicy_v2.projects.policies.html index 119c1f20977..1c2223aa7fb 100644 --- a/docs/dyn/orgpolicy_v2.projects.policies.html +++ b/docs/dyn/orgpolicy_v2.projects.policies.html @@ -81,7 +81,7 @@

Instance Methods

create(parent, body=None, x__xgafv=None)

Creates a policy. Returns a `google.rpc.Status` with `google.rpc.Code.NOT_FOUND` if the constraint does not exist. Returns a `google.rpc.Status` with `google.rpc.Code.ALREADY_EXISTS` if the policy already exists on the given Google Cloud resource.

- delete(name, x__xgafv=None)

+ delete(name, etag=None, x__xgafv=None)

Deletes a policy. Returns a `google.rpc.Status` with `google.rpc.Code.NOT_FOUND` if the constraint or organization policy does not exist.

get(name, x__xgafv=None)

@@ -171,6 +171,7 @@

Method Details

], "updateTime": "A String", # Output only. The time stamp this was previously updated. This represents the last time a call to `CreatePolicy` or `UpdatePolicy` was made for that policy. }, + "etag": "A String", # Optional. An opaque tag indicating the current state of the policy, used for concurrency control. This 'etag' is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. "name": "A String", # Immutable. The resource name of the policy. Must be one of the following forms, where `constraint_name` is the name of the constraint which this policy configures: * `projects/{project_number}/policies/{constraint_name}` * `folders/{folder_id}/policies/{constraint_name}` * `organizations/{organization_id}/policies/{constraint_name}` For example, `projects/123/policies/compute.disableSerialPortAccess`. Note: `projects/{project_id}/policies/{constraint_name}` is also an acceptable name for API requests, but responses will return the name using the equivalent project number. "spec": { # Defines a Google Cloud policy specification which is used to specify constraints for configurations of Google Cloud resources. # Basic information about the Organization Policy. "etag": "A String", # An opaque tag indicating the current version of the policySpec, used for concurrency control. This field is ignored if used in a `CreatePolicy` request. When the policy is returned from either a `GetPolicy` or a `ListPolicies` request, this `etag` indicates the version of the current policySpec to use when executing a read-modify-write loop. When the policy is returned from a `GetEffectivePolicy` request, the `etag` will be unset. @@ -267,6 +268,7 @@

Method Details

], "updateTime": "A String", # Output only. The time stamp this was previously updated. This represents the last time a call to `CreatePolicy` or `UpdatePolicy` was made for that policy. }, + "etag": "A String", # Optional. An opaque tag indicating the current state of the policy, used for concurrency control. This 'etag' is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. "name": "A String", # Immutable. The resource name of the policy. Must be one of the following forms, where `constraint_name` is the name of the constraint which this policy configures: * `projects/{project_number}/policies/{constraint_name}` * `folders/{folder_id}/policies/{constraint_name}` * `organizations/{organization_id}/policies/{constraint_name}` For example, `projects/123/policies/compute.disableSerialPortAccess`. Note: `projects/{project_id}/policies/{constraint_name}` is also an acceptable name for API requests, but responses will return the name using the equivalent project number. "spec": { # Defines a Google Cloud policy specification which is used to specify constraints for configurations of Google Cloud resources. # Basic information about the Organization Policy. "etag": "A String", # An opaque tag indicating the current version of the policySpec, used for concurrency control. This field is ignored if used in a `CreatePolicy` request. When the policy is returned from either a `GetPolicy` or a `ListPolicies` request, this `etag` indicates the version of the current policySpec to use when executing a read-modify-write loop. When the policy is returned from a `GetEffectivePolicy` request, the `etag` will be unset. @@ -299,11 +301,12 @@

Method Details

- delete(name, x__xgafv=None) + delete(name, etag=None, x__xgafv=None)
Deletes a policy. Returns a `google.rpc.Status` with `google.rpc.Code.NOT_FOUND` if the constraint or organization policy does not exist.
 
 Args:
   name: string, Required. Name of the policy to delete. See the policy entry for naming rules. (required)
+  etag: string, Optional. The current etag of policy. If an etag is provided and does not match the current etag of the policy, deletion will be blocked and an ABORTED error will be returned.
   x__xgafv: string, V1 error format.
     Allowed values
       1 - v1 error format
@@ -388,6 +391,7 @@ 

Method Details

], "updateTime": "A String", # Output only. The time stamp this was previously updated. This represents the last time a call to `CreatePolicy` or `UpdatePolicy` was made for that policy. }, + "etag": "A String", # Optional. An opaque tag indicating the current state of the policy, used for concurrency control. This 'etag' is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. "name": "A String", # Immutable. The resource name of the policy. Must be one of the following forms, where `constraint_name` is the name of the constraint which this policy configures: * `projects/{project_number}/policies/{constraint_name}` * `folders/{folder_id}/policies/{constraint_name}` * `organizations/{organization_id}/policies/{constraint_name}` For example, `projects/123/policies/compute.disableSerialPortAccess`. Note: `projects/{project_id}/policies/{constraint_name}` is also an acceptable name for API requests, but responses will return the name using the equivalent project number. "spec": { # Defines a Google Cloud policy specification which is used to specify constraints for configurations of Google Cloud resources. # Basic information about the Organization Policy. "etag": "A String", # An opaque tag indicating the current version of the policySpec, used for concurrency control. This field is ignored if used in a `CreatePolicy` request. When the policy is returned from either a `GetPolicy` or a `ListPolicies` request, this `etag` indicates the version of the current policySpec to use when executing a read-modify-write loop. When the policy is returned from a `GetEffectivePolicy` request, the `etag` will be unset. @@ -491,6 +495,7 @@

Method Details

], "updateTime": "A String", # Output only. The time stamp this was previously updated. This represents the last time a call to `CreatePolicy` or `UpdatePolicy` was made for that policy. }, + "etag": "A String", # Optional. An opaque tag indicating the current state of the policy, used for concurrency control. This 'etag' is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. "name": "A String", # Immutable. The resource name of the policy. Must be one of the following forms, where `constraint_name` is the name of the constraint which this policy configures: * `projects/{project_number}/policies/{constraint_name}` * `folders/{folder_id}/policies/{constraint_name}` * `organizations/{organization_id}/policies/{constraint_name}` For example, `projects/123/policies/compute.disableSerialPortAccess`. Note: `projects/{project_id}/policies/{constraint_name}` is also an acceptable name for API requests, but responses will return the name using the equivalent project number. "spec": { # Defines a Google Cloud policy specification which is used to specify constraints for configurations of Google Cloud resources. # Basic information about the Organization Policy. "etag": "A String", # An opaque tag indicating the current version of the policySpec, used for concurrency control. This field is ignored if used in a `CreatePolicy` request. When the policy is returned from either a `GetPolicy` or a `ListPolicies` request, this `etag` indicates the version of the current policySpec to use when executing a read-modify-write loop. When the policy is returned from a `GetEffectivePolicy` request, the `etag` will be unset. @@ -599,6 +604,7 @@

Method Details

], "updateTime": "A String", # Output only. The time stamp this was previously updated. This represents the last time a call to `CreatePolicy` or `UpdatePolicy` was made for that policy. }, + "etag": "A String", # Optional. An opaque tag indicating the current state of the policy, used for concurrency control. This 'etag' is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. "name": "A String", # Immutable. The resource name of the policy. Must be one of the following forms, where `constraint_name` is the name of the constraint which this policy configures: * `projects/{project_number}/policies/{constraint_name}` * `folders/{folder_id}/policies/{constraint_name}` * `organizations/{organization_id}/policies/{constraint_name}` For example, `projects/123/policies/compute.disableSerialPortAccess`. Note: `projects/{project_id}/policies/{constraint_name}` is also an acceptable name for API requests, but responses will return the name using the equivalent project number. "spec": { # Defines a Google Cloud policy specification which is used to specify constraints for configurations of Google Cloud resources. # Basic information about the Organization Policy. "etag": "A String", # An opaque tag indicating the current version of the policySpec, used for concurrency control. This field is ignored if used in a `CreatePolicy` request. When the policy is returned from either a `GetPolicy` or a `ListPolicies` request, this `etag` indicates the version of the current policySpec to use when executing a read-modify-write loop. When the policy is returned from a `GetEffectivePolicy` request, the `etag` will be unset. @@ -713,6 +719,7 @@

Method Details

], "updateTime": "A String", # Output only. The time stamp this was previously updated. This represents the last time a call to `CreatePolicy` or `UpdatePolicy` was made for that policy. }, + "etag": "A String", # Optional. An opaque tag indicating the current state of the policy, used for concurrency control. This 'etag' is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. "name": "A String", # Immutable. The resource name of the policy. Must be one of the following forms, where `constraint_name` is the name of the constraint which this policy configures: * `projects/{project_number}/policies/{constraint_name}` * `folders/{folder_id}/policies/{constraint_name}` * `organizations/{organization_id}/policies/{constraint_name}` For example, `projects/123/policies/compute.disableSerialPortAccess`. Note: `projects/{project_id}/policies/{constraint_name}` is also an acceptable name for API requests, but responses will return the name using the equivalent project number. "spec": { # Defines a Google Cloud policy specification which is used to specify constraints for configurations of Google Cloud resources. # Basic information about the Organization Policy. "etag": "A String", # An opaque tag indicating the current version of the policySpec, used for concurrency control. This field is ignored if used in a `CreatePolicy` request. When the policy is returned from either a `GetPolicy` or a `ListPolicies` request, this `etag` indicates the version of the current policySpec to use when executing a read-modify-write loop. When the policy is returned from a `GetEffectivePolicy` request, the `etag` will be unset. @@ -810,6 +817,7 @@

Method Details

], "updateTime": "A String", # Output only. The time stamp this was previously updated. This represents the last time a call to `CreatePolicy` or `UpdatePolicy` was made for that policy. }, + "etag": "A String", # Optional. An opaque tag indicating the current state of the policy, used for concurrency control. This 'etag' is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. "name": "A String", # Immutable. The resource name of the policy. Must be one of the following forms, where `constraint_name` is the name of the constraint which this policy configures: * `projects/{project_number}/policies/{constraint_name}` * `folders/{folder_id}/policies/{constraint_name}` * `organizations/{organization_id}/policies/{constraint_name}` For example, `projects/123/policies/compute.disableSerialPortAccess`. Note: `projects/{project_id}/policies/{constraint_name}` is also an acceptable name for API requests, but responses will return the name using the equivalent project number. "spec": { # Defines a Google Cloud policy specification which is used to specify constraints for configurations of Google Cloud resources. # Basic information about the Organization Policy. "etag": "A String", # An opaque tag indicating the current version of the policySpec, used for concurrency control. This field is ignored if used in a `CreatePolicy` request. When the policy is returned from either a `GetPolicy` or a `ListPolicies` request, this `etag` indicates the version of the current policySpec to use when executing a read-modify-write loop. When the policy is returned from a `GetEffectivePolicy` request, the `etag` will be unset. diff --git a/docs/dyn/playdeveloperreporting_v1alpha1.vitals.errors.reports.html b/docs/dyn/playdeveloperreporting_v1alpha1.vitals.errors.reports.html index 97e934c944e..e46104ef776 100644 --- a/docs/dyn/playdeveloperreporting_v1alpha1.vitals.errors.reports.html +++ b/docs/dyn/playdeveloperreporting_v1alpha1.vitals.errors.reports.html @@ -129,6 +129,9 @@

Method Details

{ # Response with a paginated list of error reports matching the search query. "errorReports": [ # Error reports that were found. { # An error report received for an app. There reports are produced by the Android platform code when a (potentially fatal) error condition is detected. Identical reports from many users will be deduplicated and coalesced into a single ErrorReport. **Required permissions**: to access this resource, the calling user needs the _View app information (read-only)_ permission for the app. + "appVersion": { # Representations of an app version. # The app version on which an event in this error report occurred on. + "versionCode": "A String", # Numeric version code of the app version (set by the app's developer). + }, "deviceModel": { # Summary of a device # A device model on which an event in this error report occurred on. "deviceId": { # Identifier of a device. # Identifier of the device. "buildBrand": "A String", # Value of Build.BRAND. @@ -145,6 +148,7 @@

Method Details

}, "reportText": "A String", # Textual representation of the error report. These textual reports are produced by the platform. The reports are then sanitized and filtered to remove any potentially sensitive information. Although their format is fairly stable, they are not entirely meant for machine consumption and we cannot guarantee that there won't be subtle changes to the formatting that may break systems trying to parse information out of the reports. "type": "A String", # Type of the error for which this report was generated. + "vcsInformation": "A String", # Version control system information from BUNDLE-METADATA/version-control-info.textproto or META-INF/version-control-info.textproto of the app bundle or APK, respectively. }, ], "nextPageToken": "A String", # Page token to fetch the next page of reports. diff --git a/docs/dyn/playdeveloperreporting_v1beta1.vitals.errors.reports.html b/docs/dyn/playdeveloperreporting_v1beta1.vitals.errors.reports.html index d7ac7e6be21..b59c040bd3c 100644 --- a/docs/dyn/playdeveloperreporting_v1beta1.vitals.errors.reports.html +++ b/docs/dyn/playdeveloperreporting_v1beta1.vitals.errors.reports.html @@ -129,6 +129,9 @@

Method Details

{ # Response with a paginated list of error reports matching the search query. "errorReports": [ # Error reports that were found. { # An error report received for an app. There reports are produced by the Android platform code when a (potentially fatal) error condition is detected. Identical reports from many users will be deduplicated and coalesced into a single ErrorReport. **Required permissions**: to access this resource, the calling user needs the _View app information (read-only)_ permission for the app. + "appVersion": { # Representations of an app version. # The app version on which an event in this error report occurred on. + "versionCode": "A String", # Numeric version code of the app version (set by the app's developer). + }, "deviceModel": { # Summary of a device # A device model on which an event in this error report occurred on. "deviceId": { # Identifier of a device. # Identifier of the device. "buildBrand": "A String", # Value of Build.BRAND. @@ -145,6 +148,7 @@

Method Details

}, "reportText": "A String", # Textual representation of the error report. These textual reports are produced by the platform. The reports are then sanitized and filtered to remove any potentially sensitive information. Although their format is fairly stable, they are not entirely meant for machine consumption and we cannot guarantee that there won't be subtle changes to the formatting that may break systems trying to parse information out of the reports. "type": "A String", # Type of the error for which this report was generated. + "vcsInformation": "A String", # Version control system information from BUNDLE-METADATA/version-control-info.textproto or META-INF/version-control-info.textproto of the app bundle or APK, respectively. }, ], "nextPageToken": "A String", # Page token to fetch the next page of reports. diff --git a/docs/dyn/privateca_v1.projects.locations.caPools.html b/docs/dyn/privateca_v1.projects.locations.caPools.html index e36974723d1..402c53d09cf 100644 --- a/docs/dyn/privateca_v1.projects.locations.caPools.html +++ b/docs/dyn/privateca_v1.projects.locations.caPools.html @@ -357,7 +357,7 @@

Method Details

An object of the form: { # Response message for CertificateAuthorityService.FetchCaCerts. - "caCerts": [ # The PEM encoded CA certificate chains of all ACTIVE CertificateAuthority resources in this CaPool. + "caCerts": [ # The PEM encoded CA certificate chains of all Certificate Authorities in this CaPool in the ENABLED, DISABLED, or STAGED states. { "certificates": [ # The certificates that form the CA chain, from leaf to root order. "A String", diff --git a/docs/dyn/pubsub_v1.projects.subscriptions.html b/docs/dyn/pubsub_v1.projects.subscriptions.html index e3eb9cd4aa1..b2eae02151f 100644 --- a/docs/dyn/pubsub_v1.projects.subscriptions.html +++ b/docs/dyn/pubsub_v1.projects.subscriptions.html @@ -170,7 +170,8 @@

Method Details

"dropUnknownFields": True or False, # Optional. When true and use_topic_schema is true, any fields that are a part of the topic schema that are not part of the BigQuery table schema are dropped when writing to BigQuery. Otherwise, the schemas must be kept in sync and any messages with extra fields are not written and remain in the subscription's backlog. "state": "A String", # Output only. An output-only field that indicates whether or not the subscription can receive messages. "table": "A String", # Optional. The name of the table to which to write data, of the form {projectId}.{datasetId}.{tableId} - "useTopicSchema": True or False, # Optional. When true, use the topic's schema as the columns to write to in BigQuery, if it exists. + "useTableSchema": True or False, # Optional. When true, use the BigQuery table's schema as the columns to write to in BigQuery. `use_table_schema` and `use_topic_schema` cannot be enabled at the same time. + "useTopicSchema": True or False, # Optional. When true, use the topic's schema as the columns to write to in BigQuery, if it exists. `use_topic_schema` and `use_table_schema` cannot be enabled at the same time. "writeMetadata": True or False, # Optional. When true, write the subscription name, message_id, publish_time, attributes, and ordering_key to additional columns in the table. The subscription name, message_id, and publish_time fields are put in their own columns while all other message properties (other than data) are written to a JSON object in the attributes column. }, "cloudStorageConfig": { # Configuration for a Cloud Storage subscription. # Optional. If delivery to Google Cloud Storage is used with this subscription, this field is used to configure it. @@ -241,7 +242,8 @@

Method Details

"dropUnknownFields": True or False, # Optional. When true and use_topic_schema is true, any fields that are a part of the topic schema that are not part of the BigQuery table schema are dropped when writing to BigQuery. Otherwise, the schemas must be kept in sync and any messages with extra fields are not written and remain in the subscription's backlog. "state": "A String", # Output only. An output-only field that indicates whether or not the subscription can receive messages. "table": "A String", # Optional. The name of the table to which to write data, of the form {projectId}.{datasetId}.{tableId} - "useTopicSchema": True or False, # Optional. When true, use the topic's schema as the columns to write to in BigQuery, if it exists. + "useTableSchema": True or False, # Optional. When true, use the BigQuery table's schema as the columns to write to in BigQuery. `use_table_schema` and `use_topic_schema` cannot be enabled at the same time. + "useTopicSchema": True or False, # Optional. When true, use the topic's schema as the columns to write to in BigQuery, if it exists. `use_topic_schema` and `use_table_schema` cannot be enabled at the same time. "writeMetadata": True or False, # Optional. When true, write the subscription name, message_id, publish_time, attributes, and ordering_key to additional columns in the table. The subscription name, message_id, and publish_time fields are put in their own columns while all other message properties (other than data) are written to a JSON object in the attributes column. }, "cloudStorageConfig": { # Configuration for a Cloud Storage subscription. # Optional. If delivery to Google Cloud Storage is used with this subscription, this field is used to configure it. @@ -355,7 +357,8 @@

Method Details

"dropUnknownFields": True or False, # Optional. When true and use_topic_schema is true, any fields that are a part of the topic schema that are not part of the BigQuery table schema are dropped when writing to BigQuery. Otherwise, the schemas must be kept in sync and any messages with extra fields are not written and remain in the subscription's backlog. "state": "A String", # Output only. An output-only field that indicates whether or not the subscription can receive messages. "table": "A String", # Optional. The name of the table to which to write data, of the form {projectId}.{datasetId}.{tableId} - "useTopicSchema": True or False, # Optional. When true, use the topic's schema as the columns to write to in BigQuery, if it exists. + "useTableSchema": True or False, # Optional. When true, use the BigQuery table's schema as the columns to write to in BigQuery. `use_table_schema` and `use_topic_schema` cannot be enabled at the same time. + "useTopicSchema": True or False, # Optional. When true, use the topic's schema as the columns to write to in BigQuery, if it exists. `use_topic_schema` and `use_table_schema` cannot be enabled at the same time. "writeMetadata": True or False, # Optional. When true, write the subscription name, message_id, publish_time, attributes, and ordering_key to additional columns in the table. The subscription name, message_id, and publish_time fields are put in their own columns while all other message properties (other than data) are written to a JSON object in the attributes column. }, "cloudStorageConfig": { # Configuration for a Cloud Storage subscription. # Optional. If delivery to Google Cloud Storage is used with this subscription, this field is used to configure it. @@ -473,7 +476,8 @@

Method Details

"dropUnknownFields": True or False, # Optional. When true and use_topic_schema is true, any fields that are a part of the topic schema that are not part of the BigQuery table schema are dropped when writing to BigQuery. Otherwise, the schemas must be kept in sync and any messages with extra fields are not written and remain in the subscription's backlog. "state": "A String", # Output only. An output-only field that indicates whether or not the subscription can receive messages. "table": "A String", # Optional. The name of the table to which to write data, of the form {projectId}.{datasetId}.{tableId} - "useTopicSchema": True or False, # Optional. When true, use the topic's schema as the columns to write to in BigQuery, if it exists. + "useTableSchema": True or False, # Optional. When true, use the BigQuery table's schema as the columns to write to in BigQuery. `use_table_schema` and `use_topic_schema` cannot be enabled at the same time. + "useTopicSchema": True or False, # Optional. When true, use the topic's schema as the columns to write to in BigQuery, if it exists. `use_topic_schema` and `use_table_schema` cannot be enabled at the same time. "writeMetadata": True or False, # Optional. When true, write the subscription name, message_id, publish_time, attributes, and ordering_key to additional columns in the table. The subscription name, message_id, and publish_time fields are put in their own columns while all other message properties (other than data) are written to a JSON object in the attributes column. }, "cloudStorageConfig": { # Configuration for a Cloud Storage subscription. # Optional. If delivery to Google Cloud Storage is used with this subscription, this field is used to configure it. @@ -630,7 +634,8 @@

Method Details

"dropUnknownFields": True or False, # Optional. When true and use_topic_schema is true, any fields that are a part of the topic schema that are not part of the BigQuery table schema are dropped when writing to BigQuery. Otherwise, the schemas must be kept in sync and any messages with extra fields are not written and remain in the subscription's backlog. "state": "A String", # Output only. An output-only field that indicates whether or not the subscription can receive messages. "table": "A String", # Optional. The name of the table to which to write data, of the form {projectId}.{datasetId}.{tableId} - "useTopicSchema": True or False, # Optional. When true, use the topic's schema as the columns to write to in BigQuery, if it exists. + "useTableSchema": True or False, # Optional. When true, use the BigQuery table's schema as the columns to write to in BigQuery. `use_table_schema` and `use_topic_schema` cannot be enabled at the same time. + "useTopicSchema": True or False, # Optional. When true, use the topic's schema as the columns to write to in BigQuery, if it exists. `use_topic_schema` and `use_table_schema` cannot be enabled at the same time. "writeMetadata": True or False, # Optional. When true, write the subscription name, message_id, publish_time, attributes, and ordering_key to additional columns in the table. The subscription name, message_id, and publish_time fields are put in their own columns while all other message properties (other than data) are written to a JSON object in the attributes column. }, "cloudStorageConfig": { # Configuration for a Cloud Storage subscription. # Optional. If delivery to Google Cloud Storage is used with this subscription, this field is used to configure it. @@ -703,7 +708,8 @@

Method Details

"dropUnknownFields": True or False, # Optional. When true and use_topic_schema is true, any fields that are a part of the topic schema that are not part of the BigQuery table schema are dropped when writing to BigQuery. Otherwise, the schemas must be kept in sync and any messages with extra fields are not written and remain in the subscription's backlog. "state": "A String", # Output only. An output-only field that indicates whether or not the subscription can receive messages. "table": "A String", # Optional. The name of the table to which to write data, of the form {projectId}.{datasetId}.{tableId} - "useTopicSchema": True or False, # Optional. When true, use the topic's schema as the columns to write to in BigQuery, if it exists. + "useTableSchema": True or False, # Optional. When true, use the BigQuery table's schema as the columns to write to in BigQuery. `use_table_schema` and `use_topic_schema` cannot be enabled at the same time. + "useTopicSchema": True or False, # Optional. When true, use the topic's schema as the columns to write to in BigQuery, if it exists. `use_topic_schema` and `use_table_schema` cannot be enabled at the same time. "writeMetadata": True or False, # Optional. When true, write the subscription name, message_id, publish_time, attributes, and ordering_key to additional columns in the table. The subscription name, message_id, and publish_time fields are put in their own columns while all other message properties (other than data) are written to a JSON object in the attributes column. }, "cloudStorageConfig": { # Configuration for a Cloud Storage subscription. # Optional. If delivery to Google Cloud Storage is used with this subscription, this field is used to configure it. diff --git a/docs/dyn/script_v1.projects.html b/docs/dyn/script_v1.projects.html index 5c3b8588b37..daf68fa4e2a 100644 --- a/docs/dyn/script_v1.projects.html +++ b/docs/dyn/script_v1.projects.html @@ -226,6 +226,7 @@

Method Details

"updateTime": "A String", # Last modified date timestamp. This read-only field is only visible to users who have WRITER permission for the script project. }, ], + "revertFlumeInvoked": True or False, # Set to true if called from revert flume to allow deletion of system generated manifest file while validating content request. This value is false by default. "scriptId": "A String", # The script project's Drive ID. }
@@ -310,6 +311,7 @@

Method Details

"updateTime": "A String", # Last modified date timestamp. This read-only field is only visible to users who have WRITER permission for the script project. }, ], + "revertFlumeInvoked": True or False, # Set to true if called from revert flume to allow deletion of system generated manifest file while validating content request. This value is false by default. "scriptId": "A String", # The script project's Drive ID. } @@ -347,6 +349,7 @@

Method Details

"updateTime": "A String", # Last modified date timestamp. This read-only field is only visible to users who have WRITER permission for the script project. }, ], + "revertFlumeInvoked": True or False, # Set to true if called from revert flume to allow deletion of system generated manifest file while validating content request. This value is false by default. "scriptId": "A String", # The script project's Drive ID. }
diff --git a/docs/dyn/securitycenter_v1.folders.eventThreatDetectionSettings.customModules.html b/docs/dyn/securitycenter_v1.folders.eventThreatDetectionSettings.customModules.html index 55e14c82c0c..f50ce61f4ac 100644 --- a/docs/dyn/securitycenter_v1.folders.eventThreatDetectionSettings.customModules.html +++ b/docs/dyn/securitycenter_v1.folders.eventThreatDetectionSettings.customModules.html @@ -79,22 +79,28 @@

Instance Methods

Close httplib2 connections.

create(parent, body=None, x__xgafv=None)

-

Creates an ETD custom module at the given level. Creating a module has a side-effect of creating modules at all descendants.

+

Creates a resident Event Threat Detection custom module at the scope of the given Resource Manager parent, and also creates inherited custom modules for all descendants of the given parent. These modules are enabled by default.

delete(name, x__xgafv=None)

-

Deletes an ETD custom module. Deletion at resident level also deletes modules at all descendants. Deletion at any other level is not supported.

+

Deletes the specified Event Threat Detection custom module and all of its descendants in the Resource Manager hierarchy. This method is only supported for resident custom modules.

get(name, x__xgafv=None)

-

Gets an ETD custom module. Retrieves the module at the given level.

+

Gets an Event Threat Detection custom module.

list(parent, pageSize=None, pageToken=None, x__xgafv=None)

-

Lists ETD custom modules. Retrieve all resident and inherited modules at the given level (no descendants).

+

Lists all Event Threat Detection custom modules for the given Resource Manager parent. This includes resident modules defined at the scope of the parent along with modules inherited from ancestors.

+

+ listDescendant(parent, pageSize=None, pageToken=None, x__xgafv=None)

+

Lists all resident Event Threat Detection custom modules under the given Resource Manager parent and its descendants.

+

+ listDescendant_next()

+

Retrieves the next page of results.

list_next()

Retrieves the next page of results.

patch(name, body=None, updateMask=None, x__xgafv=None)

-

Updates an ETD custom module at the given level. All config fields can be updated when updating the module at resident level. Only enablement state can be updated when updating the module at inherited levels. Updating the module has a side-effect that it updates all descendants that are inherited from this module.

+

Updates the Event Threat Detection custom module with the given name based on the given update mask. Updating the enablement state is supported for both resident and inherited modules (though resident modules cannot have an enablement state of "inherited"). Updating the display name or configuration of a module is supported for resident modules only. The type of a module cannot be changed.

Method Details

close() @@ -103,14 +109,15 @@

Method Details

create(parent, body=None, x__xgafv=None) -
Creates an ETD custom module at the given level. Creating a module has a side-effect of creating modules at all descendants.
+  
Creates a resident Event Threat Detection custom module at the scope of the given Resource Manager parent, and also creates inherited custom modules for all descendants of the given parent. These modules are enabled by default.
 
 Args:
   parent: string, Required. The new custom module's parent. Its format is: * "organizations/{organization}/eventThreatDetectionSettings". * "folders/{folder}/eventThreatDetectionSettings". * "projects/{project}/eventThreatDetectionSettings". (required)
   body: object, The request body.
     The object takes the form of:
 
-{ # Represents an instance of an Event Threat Detection custom module, including its full module name, display name, enablement state, and last updated time. You can create a custom module at the organization level only.
+{ # Represents an instance of an Event Threat Detection custom module, including its full module name, display name, enablement state, and last updated time. You can create a custom module at the organization, folder, or project level. Custom modules that you create at the organization or folder level are inherited by child folders and projects.
+  "ancestorModule": "A String", # Output only. The closest ancestor module that this module inherits the enablement state from. The format is the same as the EventThreatDetectionCustomModule resource name.
   "config": { # Config for the module. For the resident module, its config value is defined at this level. For the inherited module, its config value is inherited from the ancestor module.
     "a_key": "", # Properties of the object.
   },
@@ -131,7 +138,8 @@ 

Method Details

Returns: An object of the form: - { # Represents an instance of an Event Threat Detection custom module, including its full module name, display name, enablement state, and last updated time. You can create a custom module at the organization level only. + { # Represents an instance of an Event Threat Detection custom module, including its full module name, display name, enablement state, and last updated time. You can create a custom module at the organization, folder, or project level. Custom modules that you create at the organization or folder level are inherited by child folders and projects. + "ancestorModule": "A String", # Output only. The closest ancestor module that this module inherits the enablement state from. The format is the same as the EventThreatDetectionCustomModule resource name. "config": { # Config for the module. For the resident module, its config value is defined at this level. For the inherited module, its config value is inherited from the ancestor module. "a_key": "", # Properties of the object. }, @@ -147,7 +155,7 @@

Method Details

delete(name, x__xgafv=None) -
Deletes an ETD custom module. Deletion at resident level also deletes modules at all descendants. Deletion at any other level is not supported.
+  
Deletes the specified Event Threat Detection custom module and all of its descendants in the Resource Manager hierarchy. This method is only supported for resident custom modules.
 
 Args:
   name: string, Required. Name of the custom module to delete. Its format is: * "organizations/{organization}/eventThreatDetectionSettings/customModules/{module}". * "folders/{folder}/eventThreatDetectionSettings/customModules/{module}". * "projects/{project}/eventThreatDetectionSettings/customModules/{module}". (required)
@@ -165,7 +173,7 @@ 

Method Details

get(name, x__xgafv=None) -
Gets an ETD custom module. Retrieves the module at the given level.
+  
Gets an Event Threat Detection custom module.
 
 Args:
   name: string, Required. Name of the custom module to get. Its format is: * "organizations/{organization}/eventThreatDetectionSettings/customModules/{module}". * "folders/{folder}/eventThreatDetectionSettings/customModules/{module}". * "projects/{project}/eventThreatDetectionSettings/customModules/{module}". (required)
@@ -177,7 +185,8 @@ 

Method Details

Returns: An object of the form: - { # Represents an instance of an Event Threat Detection custom module, including its full module name, display name, enablement state, and last updated time. You can create a custom module at the organization level only. + { # Represents an instance of an Event Threat Detection custom module, including its full module name, display name, enablement state, and last updated time. You can create a custom module at the organization, folder, or project level. Custom modules that you create at the organization or folder level are inherited by child folders and projects. + "ancestorModule": "A String", # Output only. The closest ancestor module that this module inherits the enablement state from. The format is the same as the EventThreatDetectionCustomModule resource name. "config": { # Config for the module. For the resident module, its config value is defined at this level. For the inherited module, its config value is inherited from the ancestor module. "a_key": "", # Properties of the object. }, @@ -193,10 +202,10 @@

Method Details

list(parent, pageSize=None, pageToken=None, x__xgafv=None) -
Lists ETD custom modules. Retrieve all resident and inherited modules at the given level (no descendants).
+  
Lists all Event Threat Detection custom modules for the given Resource Manager parent. This includes resident modules defined at the scope of the parent along with modules inherited from ancestors.
 
 Args:
-  parent: string, Required. Name of the parent to list custom modules. Its format is: * "organizations/{organization}/eventThreatDetectionSettings". * "folders/{folder}/eventThreatDetectionSettings". * "projects/{project}/eventThreatDetectionSettings". (required)
+  parent: string, Required. Name of the parent to list custom modules under. Its format is: * "organizations/{organization}/eventThreatDetectionSettings". * "folders/{folder}/eventThreatDetectionSettings". * "projects/{project}/eventThreatDetectionSettings". (required)
   pageSize: integer, The maximum number of modules to return. The service may return fewer than this value. If unspecified, at most 10 configs will be returned. The maximum value is 1000; values above 1000 will be coerced to 1000.
   pageToken: string, A page token, received from a previous `ListEventThreatDetectionCustomModules` call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to `ListEventThreatDetectionCustomModules` must match the call that provided the page token.
   x__xgafv: string, V1 error format.
@@ -207,9 +216,10 @@ 

Method Details

Returns: An object of the form: - { # Response for listing EventThreatDetectionCustomModules. + { # Response for listing Event Threat Detection custom modules. "eventThreatDetectionCustomModules": [ # Custom modules belonging to the requested parent. - { # Represents an instance of an Event Threat Detection custom module, including its full module name, display name, enablement state, and last updated time. You can create a custom module at the organization level only. + { # Represents an instance of an Event Threat Detection custom module, including its full module name, display name, enablement state, and last updated time. You can create a custom module at the organization, folder, or project level. Custom modules that you create at the organization or folder level are inherited by child folders and projects. + "ancestorModule": "A String", # Output only. The closest ancestor module that this module inherits the enablement state from. The format is the same as the EventThreatDetectionCustomModule resource name. "config": { # Config for the module. For the resident module, its config value is defined at this level. For the inherited module, its config value is inherited from the ancestor module. "a_key": "", # Properties of the object. }, @@ -226,6 +236,56 @@

Method Details

}
+
+ listDescendant(parent, pageSize=None, pageToken=None, x__xgafv=None) +
Lists all resident Event Threat Detection custom modules under the given Resource Manager parent and its descendants.
+
+Args:
+  parent: string, Required. Name of the parent to list custom modules under. Its format is: * "organizations/{organization}/eventThreatDetectionSettings". * "folders/{folder}/eventThreatDetectionSettings". * "projects/{project}/eventThreatDetectionSettings". (required)
+  pageSize: integer, The maximum number of modules to return. The service may return fewer than this value. If unspecified, at most 10 configs will be returned. The maximum value is 1000; values above 1000 will be coerced to 1000.
+  pageToken: string, A page token, received from a previous `ListDescendantEventThreatDetectionCustomModules` call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to `ListDescendantEventThreatDetectionCustomModules` must match the call that provided the page token.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response for listing current and descendant resident Event Threat Detection custom modules.
+  "eventThreatDetectionCustomModules": [ # Custom modules belonging to the requested parent.
+    { # Represents an instance of an Event Threat Detection custom module, including its full module name, display name, enablement state, and last updated time. You can create a custom module at the organization, folder, or project level. Custom modules that you create at the organization or folder level are inherited by child folders and projects.
+      "ancestorModule": "A String", # Output only. The closest ancestor module that this module inherits the enablement state from. The format is the same as the EventThreatDetectionCustomModule resource name.
+      "config": { # Config for the module. For the resident module, its config value is defined at this level. For the inherited module, its config value is inherited from the ancestor module.
+        "a_key": "", # Properties of the object.
+      },
+      "description": "A String", # The description for the module.
+      "displayName": "A String", # The human readable name to be displayed for the module.
+      "enablementState": "A String", # The state of enablement for the module at the given level of the hierarchy.
+      "lastEditor": "A String", # Output only. The editor the module was last updated by.
+      "name": "A String", # Immutable. The resource name of the Event Threat Detection custom module. Its format is: * "organizations/{organization}/eventThreatDetectionSettings/customModules/{module}". * "folders/{folder}/eventThreatDetectionSettings/customModules/{module}". * "projects/{project}/eventThreatDetectionSettings/customModules/{module}".
+      "type": "A String", # Type for the module. e.g. CONFIGURABLE_BAD_IP.
+      "updateTime": "A String", # Output only. The time the module was last updated.
+    },
+  ],
+  "nextPageToken": "A String", # A token, which can be sent as `page_token` to retrieve the next page. If this field is omitted, there are no subsequent pages.
+}
+
+ +
+ listDescendant_next() +
Retrieves the next page of results.
+
+        Args:
+          previous_request: The request for the previous page. (required)
+          previous_response: The response from the request for the previous page. (required)
+
+        Returns:
+          A request object that you can call 'execute()' on to request the next
+          page. Returns None if there are no more items in the collection.
+        
+
+
list_next()
Retrieves the next page of results.
@@ -242,14 +302,15 @@ 

Method Details

patch(name, body=None, updateMask=None, x__xgafv=None) -
Updates an ETD custom module at the given level. All config fields can be updated when updating the module at resident level. Only enablement state can be updated when updating the module at inherited levels. Updating the module has a side-effect that it updates all descendants that are inherited from this module.
+  
Updates the Event Threat Detection custom module with the given name based on the given update mask. Updating the enablement state is supported for both resident and inherited modules (though resident modules cannot have an enablement state of "inherited"). Updating the display name or configuration of a module is supported for resident modules only. The type of a module cannot be changed.
 
 Args:
   name: string, Immutable. The resource name of the Event Threat Detection custom module. Its format is: * "organizations/{organization}/eventThreatDetectionSettings/customModules/{module}". * "folders/{folder}/eventThreatDetectionSettings/customModules/{module}". * "projects/{project}/eventThreatDetectionSettings/customModules/{module}". (required)
   body: object, The request body.
     The object takes the form of:
 
-{ # Represents an instance of an Event Threat Detection custom module, including its full module name, display name, enablement state, and last updated time. You can create a custom module at the organization level only.
+{ # Represents an instance of an Event Threat Detection custom module, including its full module name, display name, enablement state, and last updated time. You can create a custom module at the organization, folder, or project level. Custom modules that you create at the organization or folder level are inherited by child folders and projects.
+  "ancestorModule": "A String", # Output only. The closest ancestor module that this module inherits the enablement state from. The format is the same as the EventThreatDetectionCustomModule resource name.
   "config": { # Config for the module. For the resident module, its config value is defined at this level. For the inherited module, its config value is inherited from the ancestor module.
     "a_key": "", # Properties of the object.
   },
@@ -271,7 +332,8 @@ 

Method Details

Returns: An object of the form: - { # Represents an instance of an Event Threat Detection custom module, including its full module name, display name, enablement state, and last updated time. You can create a custom module at the organization level only. + { # Represents an instance of an Event Threat Detection custom module, including its full module name, display name, enablement state, and last updated time. You can create a custom module at the organization, folder, or project level. Custom modules that you create at the organization or folder level are inherited by child folders and projects. + "ancestorModule": "A String", # Output only. The closest ancestor module that this module inherits the enablement state from. The format is the same as the EventThreatDetectionCustomModule resource name. "config": { # Config for the module. For the resident module, its config value is defined at this level. For the inherited module, its config value is inherited from the ancestor module. "a_key": "", # Properties of the object. }, diff --git a/docs/dyn/securitycenter_v1.folders.eventThreatDetectionSettings.effectiveCustomModules.html b/docs/dyn/securitycenter_v1.folders.eventThreatDetectionSettings.effectiveCustomModules.html new file mode 100644 index 00000000000..70c9e653298 --- /dev/null +++ b/docs/dyn/securitycenter_v1.folders.eventThreatDetectionSettings.effectiveCustomModules.html @@ -0,0 +1,168 @@ + + + +

Security Command Center API . folders . eventThreatDetectionSettings . effectiveCustomModules

+

Instance Methods

+

+ close()

+

Close httplib2 connections.

+

+ get(name, x__xgafv=None)

+

Gets an effective Event Threat Detection custom module at the given level.

+

+ list(parent, pageSize=None, pageToken=None, x__xgafv=None)

+

Lists all effective Event Threat Detection custom modules for the given parent. This includes resident modules defined at the scope of the parent along with modules inherited from its ancestors.

+

+ list_next()

+

Retrieves the next page of results.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ +
+ get(name, x__xgafv=None) +
Gets an effective Event Threat Detection custom module at the given level.
+
+Args:
+  name: string, Required. The resource name of the effective Event Threat Detection custom module. Its format is: * "organizations/{organization}/eventThreatDetectionSettings/effectiveCustomModules/{module}". * "folders/{folder}/eventThreatDetectionSettings/effectiveCustomModules/{module}". * "projects/{project}/eventThreatDetectionSettings/effectiveCustomModules/{module}". (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # An EffectiveEventThreatDetectionCustomModule is the representation of an Event Threat Detection custom module at a specified level of the resource hierarchy: organization, folder, or project. If a custom module is inherited from a parent organization or folder, the value of the `enablement_state` property in EffectiveEventThreatDetectionCustomModule is set to the value that is effective in the parent, instead of `INHERITED`. For example, if the module is enabled in a parent organization or folder, the effective `enablement_state` for the module in all child folders or projects is also `enabled`. EffectiveEventThreatDetectionCustomModule is read-only.
+  "config": { # Output only. Config for the effective module.
+    "a_key": "", # Properties of the object.
+  },
+  "description": "A String", # Output only. The description for the module.
+  "displayName": "A String", # Output only. The human readable name to be displayed for the module.
+  "enablementState": "A String", # Output only. The effective state of enablement for the module at the given level of the hierarchy.
+  "name": "A String", # Output only. The resource name of the effective ETD custom module. Its format is: * "organizations/{organization}/eventThreatDetectionSettings/effectiveCustomModules/{module}". * "folders/{folder}/eventThreatDetectionSettings/effectiveCustomModules/{module}". * "projects/{project}/eventThreatDetectionSettings/effectiveCustomModules/{module}".
+  "type": "A String", # Output only. Type for the module. e.g. CONFIGURABLE_BAD_IP.
+}
+
+ +
+ list(parent, pageSize=None, pageToken=None, x__xgafv=None) +
Lists all effective Event Threat Detection custom modules for the given parent. This includes resident modules defined at the scope of the parent along with modules inherited from its ancestors.
+
+Args:
+  parent: string, Required. Name of the parent to list custom modules for. Its format is: * "organizations/{organization}/eventThreatDetectionSettings". * "folders/{folder}/eventThreatDetectionSettings". * "projects/{project}/eventThreatDetectionSettings". (required)
+  pageSize: integer, The maximum number of modules to return. The service may return fewer than this value. If unspecified, at most 10 configs will be returned. The maximum value is 1000; values above 1000 will be coerced to 1000.
+  pageToken: string, A page token, received from a previous `ListEffectiveEventThreatDetectionCustomModules` call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to `ListEffectiveEventThreatDetectionCustomModules` must match the call that provided the page token.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response for listing EffectiveEventThreatDetectionCustomModules.
+  "effectiveEventThreatDetectionCustomModules": [ # Effective custom modules belonging to the requested parent.
+    { # An EffectiveEventThreatDetectionCustomModule is the representation of an Event Threat Detection custom module at a specified level of the resource hierarchy: organization, folder, or project. If a custom module is inherited from a parent organization or folder, the value of the `enablement_state` property in EffectiveEventThreatDetectionCustomModule is set to the value that is effective in the parent, instead of `INHERITED`. For example, if the module is enabled in a parent organization or folder, the effective `enablement_state` for the module in all child folders or projects is also `enabled`. EffectiveEventThreatDetectionCustomModule is read-only.
+      "config": { # Output only. Config for the effective module.
+        "a_key": "", # Properties of the object.
+      },
+      "description": "A String", # Output only. The description for the module.
+      "displayName": "A String", # Output only. The human readable name to be displayed for the module.
+      "enablementState": "A String", # Output only. The effective state of enablement for the module at the given level of the hierarchy.
+      "name": "A String", # Output only. The resource name of the effective ETD custom module. Its format is: * "organizations/{organization}/eventThreatDetectionSettings/effectiveCustomModules/{module}". * "folders/{folder}/eventThreatDetectionSettings/effectiveCustomModules/{module}". * "projects/{project}/eventThreatDetectionSettings/effectiveCustomModules/{module}".
+      "type": "A String", # Output only. Type for the module. e.g. CONFIGURABLE_BAD_IP.
+    },
+  ],
+  "nextPageToken": "A String", # A token, which can be sent as `page_token` to retrieve the next page. If this field is omitted, there are no subsequent pages.
+}
+
+ +
+ list_next() +
Retrieves the next page of results.
+
+        Args:
+          previous_request: The request for the previous page. (required)
+          previous_response: The response from the request for the previous page. (required)
+
+        Returns:
+          A request object that you can call 'execute()' on to request the next
+          page. Returns None if there are no more items in the collection.
+        
+
+ + \ No newline at end of file diff --git a/docs/dyn/securitycenter_v1.folders.eventThreatDetectionSettings.html b/docs/dyn/securitycenter_v1.folders.eventThreatDetectionSettings.html index ab1a44e06e8..84325a9b542 100644 --- a/docs/dyn/securitycenter_v1.folders.eventThreatDetectionSettings.html +++ b/docs/dyn/securitycenter_v1.folders.eventThreatDetectionSettings.html @@ -79,12 +79,17 @@

Instance Methods

Returns the customModules Resource.

+

+ effectiveCustomModules() +

+

Returns the effectiveCustomModules Resource.

+

close()

Close httplib2 connections.

validateCustomModule(parent, body=None, x__xgafv=None)

-

Validates the given Event Threat Detection Custom Module.

+

Validates the given Event Threat Detection custom module.

Method Details

close() @@ -93,14 +98,14 @@

Method Details

validateCustomModule(parent, body=None, x__xgafv=None) -
Validates the given Event Threat Detection Custom Module.
+  
Validates the given Event Threat Detection custom module.
 
 Args:
   parent: string, Required. Resource name of the parent to validate the Custom Module under. Its format is: * "organizations/{organization}/eventThreatDetectionSettings". * "folders/{folder}/eventThreatDetectionSettings". * "projects/{project}/eventThreatDetectionSettings". (required)
   body: object, The request body.
     The object takes the form of:
 
-{ # Request to validate an EventThreatDetectionCustomModule.
+{ # Request to validate an Event Threat Detection custom module.
   "rawText": "A String", # Required. The raw text of the module's contents. Used to generate error messages.
   "type": "A String", # Required. The type of the module (e.g. CONFIGURABLE_BAD_IP).
 }
@@ -113,7 +118,7 @@ 

Method Details

Returns: An object of the form: - { # Response to validating an Event Threat Detection Custom Module. + { # Response to validating an Event Threat Detection custom module. "errors": { # A list of zero or more errors encountered while validating the uploaded configuration of an Event Threat Detection Custom Module. # A list of errors returned by the validator. If the list is empty, there were no errors. "errors": [ { # An error encountered while validating the uploaded configuration of an Event Threat Detection Custom Module. diff --git a/docs/dyn/securitycenter_v1.folders.html b/docs/dyn/securitycenter_v1.folders.html index 858ea06e05d..24097c887b1 100644 --- a/docs/dyn/securitycenter_v1.folders.html +++ b/docs/dyn/securitycenter_v1.folders.html @@ -84,6 +84,11 @@

Instance Methods

Returns the bigQueryExports Resource.

+

+ eventThreatDetectionSettings() +

+

Returns the eventThreatDetectionSettings Resource.

+

findings()

diff --git a/docs/dyn/securitycenter_v1.folders.securityHealthAnalyticsSettings.customModules.html b/docs/dyn/securitycenter_v1.folders.securityHealthAnalyticsSettings.customModules.html index d25ee2e0c52..894c69b3225 100644 --- a/docs/dyn/securitycenter_v1.folders.securityHealthAnalyticsSettings.customModules.html +++ b/docs/dyn/securitycenter_v1.folders.securityHealthAnalyticsSettings.customModules.html @@ -470,7 +470,7 @@

Method Details

"updateTime": "A String", # Output only. The time at which the custom module was last updated. } - updateMask: string, The list of fields to update. + updateMask: string, The list of fields to be updated. The only fields that can be updated are `enablement_state` and `custom_config`. If empty or set to the wildcard value `*`, both `enablement_state` and `custom_config` are updated. x__xgafv: string, V1 error format. Allowed values 1 - v1 error format diff --git a/docs/dyn/securitycenter_v1.organizations.eventThreatDetectionSettings.customModules.html b/docs/dyn/securitycenter_v1.organizations.eventThreatDetectionSettings.customModules.html index 8ef01278038..20e191522ae 100644 --- a/docs/dyn/securitycenter_v1.organizations.eventThreatDetectionSettings.customModules.html +++ b/docs/dyn/securitycenter_v1.organizations.eventThreatDetectionSettings.customModules.html @@ -79,22 +79,28 @@

Instance Methods

Close httplib2 connections.

create(parent, body=None, x__xgafv=None)

-

Creates an Event Threat Detection custom module.

+

Creates a resident Event Threat Detection custom module at the scope of the given Resource Manager parent, and also creates inherited custom modules for all descendants of the given parent. These modules are enabled by default.

delete(name, x__xgafv=None)

-

Deletes an Event Threat Detection custom module.

+

Deletes the specified Event Threat Detection custom module and all of its descendants in the Resource Manager hierarchy. This method is only supported for resident custom modules.

get(name, x__xgafv=None)

Gets an Event Threat Detection custom module.

list(parent, pageSize=None, pageToken=None, x__xgafv=None)

-

Lists Event Threat Detection custom modules.

+

Lists all Event Threat Detection custom modules for the given Resource Manager parent. This includes resident modules defined at the scope of the parent along with modules inherited from ancestors.

+

+ listDescendant(parent, pageSize=None, pageToken=None, x__xgafv=None)

+

Lists all resident Event Threat Detection custom modules under the given Resource Manager parent and its descendants.

+

+ listDescendant_next()

+

Retrieves the next page of results.

list_next()

Retrieves the next page of results.

patch(name, body=None, updateMask=None, x__xgafv=None)

-

Updates an Event Threat Detection custom module.

+

Updates the Event Threat Detection custom module with the given name based on the given update mask. Updating the enablement state is supported for both resident and inherited modules (though resident modules cannot have an enablement state of "inherited"). Updating the display name or configuration of a module is supported for resident modules only. The type of a module cannot be changed.

Method Details

close() @@ -103,14 +109,15 @@

Method Details

create(parent, body=None, x__xgafv=None) -
Creates an Event Threat Detection custom module.
+  
Creates a resident Event Threat Detection custom module at the scope of the given Resource Manager parent, and also creates inherited custom modules for all descendants of the given parent. These modules are enabled by default.
 
 Args:
-  parent: string, Required. The new custom module's parent. Its format is: * "organizations/{organization}/eventThreatDetectionSettings". (required)
+  parent: string, Required. The new custom module's parent. Its format is: * "organizations/{organization}/eventThreatDetectionSettings". * "folders/{folder}/eventThreatDetectionSettings". * "projects/{project}/eventThreatDetectionSettings". (required)
   body: object, The request body.
     The object takes the form of:
 
-{ # Represents an instance of an Event Threat Detection custom module, including its full module name, display name, enablement state, and last updated time. You can create a custom module at the organization level only.
+{ # Represents an instance of an Event Threat Detection custom module, including its full module name, display name, enablement state, and last updated time. You can create a custom module at the organization, folder, or project level. Custom modules that you create at the organization or folder level are inherited by child folders and projects.
+  "ancestorModule": "A String", # Output only. The closest ancestor module that this module inherits the enablement state from. The format is the same as the EventThreatDetectionCustomModule resource name.
   "config": { # Config for the module. For the resident module, its config value is defined at this level. For the inherited module, its config value is inherited from the ancestor module.
     "a_key": "", # Properties of the object.
   },
@@ -131,7 +138,8 @@ 

Method Details

Returns: An object of the form: - { # Represents an instance of an Event Threat Detection custom module, including its full module name, display name, enablement state, and last updated time. You can create a custom module at the organization level only. + { # Represents an instance of an Event Threat Detection custom module, including its full module name, display name, enablement state, and last updated time. You can create a custom module at the organization, folder, or project level. Custom modules that you create at the organization or folder level are inherited by child folders and projects. + "ancestorModule": "A String", # Output only. The closest ancestor module that this module inherits the enablement state from. The format is the same as the EventThreatDetectionCustomModule resource name. "config": { # Config for the module. For the resident module, its config value is defined at this level. For the inherited module, its config value is inherited from the ancestor module. "a_key": "", # Properties of the object. }, @@ -147,10 +155,10 @@

Method Details

delete(name, x__xgafv=None) -
Deletes an Event Threat Detection custom module.
+  
Deletes the specified Event Threat Detection custom module and all of its descendants in the Resource Manager hierarchy. This method is only supported for resident custom modules.
 
 Args:
-  name: string, Required. Name of the custom module to delete. Its format is: * "organizations/{organization}/eventThreatDetectionSettings/customModules/{module}". (required)
+  name: string, Required. Name of the custom module to delete. Its format is: * "organizations/{organization}/eventThreatDetectionSettings/customModules/{module}". * "folders/{folder}/eventThreatDetectionSettings/customModules/{module}". * "projects/{project}/eventThreatDetectionSettings/customModules/{module}". (required)
   x__xgafv: string, V1 error format.
     Allowed values
       1 - v1 error format
@@ -168,7 +176,7 @@ 

Method Details

Gets an Event Threat Detection custom module.
 
 Args:
-  name: string, Required. Name of the custom module to get. Its format is: * "organizations/{organization}/eventThreatDetectionSettings/customModules/{module}". (required)
+  name: string, Required. Name of the custom module to get. Its format is: * "organizations/{organization}/eventThreatDetectionSettings/customModules/{module}". * "folders/{folder}/eventThreatDetectionSettings/customModules/{module}". * "projects/{project}/eventThreatDetectionSettings/customModules/{module}". (required)
   x__xgafv: string, V1 error format.
     Allowed values
       1 - v1 error format
@@ -177,7 +185,8 @@ 

Method Details

Returns: An object of the form: - { # Represents an instance of an Event Threat Detection custom module, including its full module name, display name, enablement state, and last updated time. You can create a custom module at the organization level only. + { # Represents an instance of an Event Threat Detection custom module, including its full module name, display name, enablement state, and last updated time. You can create a custom module at the organization, folder, or project level. Custom modules that you create at the organization or folder level are inherited by child folders and projects. + "ancestorModule": "A String", # Output only. The closest ancestor module that this module inherits the enablement state from. The format is the same as the EventThreatDetectionCustomModule resource name. "config": { # Config for the module. For the resident module, its config value is defined at this level. For the inherited module, its config value is inherited from the ancestor module. "a_key": "", # Properties of the object. }, @@ -193,10 +202,10 @@

Method Details

list(parent, pageSize=None, pageToken=None, x__xgafv=None) -
Lists Event Threat Detection custom modules.
+  
Lists all Event Threat Detection custom modules for the given Resource Manager parent. This includes resident modules defined at the scope of the parent along with modules inherited from ancestors.
 
 Args:
-  parent: string, Required. Name of the parent to list custom modules under. Its format is: * "organizations/{organization}/eventThreatDetectionSettings". (required)
+  parent: string, Required. Name of the parent to list custom modules under. Its format is: * "organizations/{organization}/eventThreatDetectionSettings". * "folders/{folder}/eventThreatDetectionSettings". * "projects/{project}/eventThreatDetectionSettings". (required)
   pageSize: integer, The maximum number of modules to return. The service may return fewer than this value. If unspecified, at most 10 configs will be returned. The maximum value is 1000; values above 1000 will be coerced to 1000.
   pageToken: string, A page token, received from a previous `ListEventThreatDetectionCustomModules` call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to `ListEventThreatDetectionCustomModules` must match the call that provided the page token.
   x__xgafv: string, V1 error format.
@@ -209,7 +218,8 @@ 

Method Details

{ # Response for listing Event Threat Detection custom modules. "eventThreatDetectionCustomModules": [ # Custom modules belonging to the requested parent. - { # Represents an instance of an Event Threat Detection custom module, including its full module name, display name, enablement state, and last updated time. You can create a custom module at the organization level only. + { # Represents an instance of an Event Threat Detection custom module, including its full module name, display name, enablement state, and last updated time. You can create a custom module at the organization, folder, or project level. Custom modules that you create at the organization or folder level are inherited by child folders and projects. + "ancestorModule": "A String", # Output only. The closest ancestor module that this module inherits the enablement state from. The format is the same as the EventThreatDetectionCustomModule resource name. "config": { # Config for the module. For the resident module, its config value is defined at this level. For the inherited module, its config value is inherited from the ancestor module. "a_key": "", # Properties of the object. }, @@ -226,6 +236,56 @@

Method Details

}
+
+ listDescendant(parent, pageSize=None, pageToken=None, x__xgafv=None) +
Lists all resident Event Threat Detection custom modules under the given Resource Manager parent and its descendants.
+
+Args:
+  parent: string, Required. Name of the parent to list custom modules under. Its format is: * "organizations/{organization}/eventThreatDetectionSettings". * "folders/{folder}/eventThreatDetectionSettings". * "projects/{project}/eventThreatDetectionSettings". (required)
+  pageSize: integer, The maximum number of modules to return. The service may return fewer than this value. If unspecified, at most 10 configs will be returned. The maximum value is 1000; values above 1000 will be coerced to 1000.
+  pageToken: string, A page token, received from a previous `ListDescendantEventThreatDetectionCustomModules` call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to `ListDescendantEventThreatDetectionCustomModules` must match the call that provided the page token.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response for listing current and descendant resident Event Threat Detection custom modules.
+  "eventThreatDetectionCustomModules": [ # Custom modules belonging to the requested parent.
+    { # Represents an instance of an Event Threat Detection custom module, including its full module name, display name, enablement state, and last updated time. You can create a custom module at the organization, folder, or project level. Custom modules that you create at the organization or folder level are inherited by child folders and projects.
+      "ancestorModule": "A String", # Output only. The closest ancestor module that this module inherits the enablement state from. The format is the same as the EventThreatDetectionCustomModule resource name.
+      "config": { # Config for the module. For the resident module, its config value is defined at this level. For the inherited module, its config value is inherited from the ancestor module.
+        "a_key": "", # Properties of the object.
+      },
+      "description": "A String", # The description for the module.
+      "displayName": "A String", # The human readable name to be displayed for the module.
+      "enablementState": "A String", # The state of enablement for the module at the given level of the hierarchy.
+      "lastEditor": "A String", # Output only. The editor the module was last updated by.
+      "name": "A String", # Immutable. The resource name of the Event Threat Detection custom module. Its format is: * "organizations/{organization}/eventThreatDetectionSettings/customModules/{module}". * "folders/{folder}/eventThreatDetectionSettings/customModules/{module}". * "projects/{project}/eventThreatDetectionSettings/customModules/{module}".
+      "type": "A String", # Type for the module. e.g. CONFIGURABLE_BAD_IP.
+      "updateTime": "A String", # Output only. The time the module was last updated.
+    },
+  ],
+  "nextPageToken": "A String", # A token, which can be sent as `page_token` to retrieve the next page. If this field is omitted, there are no subsequent pages.
+}
+
+ +
+ listDescendant_next() +
Retrieves the next page of results.
+
+        Args:
+          previous_request: The request for the previous page. (required)
+          previous_response: The response from the request for the previous page. (required)
+
+        Returns:
+          A request object that you can call 'execute()' on to request the next
+          page. Returns None if there are no more items in the collection.
+        
+
+
list_next()
Retrieves the next page of results.
@@ -242,14 +302,15 @@ 

Method Details

patch(name, body=None, updateMask=None, x__xgafv=None) -
Updates an Event Threat Detection custom module.
+  
Updates the Event Threat Detection custom module with the given name based on the given update mask. Updating the enablement state is supported for both resident and inherited modules (though resident modules cannot have an enablement state of "inherited"). Updating the display name or configuration of a module is supported for resident modules only. The type of a module cannot be changed.
 
 Args:
   name: string, Immutable. The resource name of the Event Threat Detection custom module. Its format is: * "organizations/{organization}/eventThreatDetectionSettings/customModules/{module}". * "folders/{folder}/eventThreatDetectionSettings/customModules/{module}". * "projects/{project}/eventThreatDetectionSettings/customModules/{module}". (required)
   body: object, The request body.
     The object takes the form of:
 
-{ # Represents an instance of an Event Threat Detection custom module, including its full module name, display name, enablement state, and last updated time. You can create a custom module at the organization level only.
+{ # Represents an instance of an Event Threat Detection custom module, including its full module name, display name, enablement state, and last updated time. You can create a custom module at the organization, folder, or project level. Custom modules that you create at the organization or folder level are inherited by child folders and projects.
+  "ancestorModule": "A String", # Output only. The closest ancestor module that this module inherits the enablement state from. The format is the same as the EventThreatDetectionCustomModule resource name.
   "config": { # Config for the module. For the resident module, its config value is defined at this level. For the inherited module, its config value is inherited from the ancestor module.
     "a_key": "", # Properties of the object.
   },
@@ -271,7 +332,8 @@ 

Method Details

Returns: An object of the form: - { # Represents an instance of an Event Threat Detection custom module, including its full module name, display name, enablement state, and last updated time. You can create a custom module at the organization level only. + { # Represents an instance of an Event Threat Detection custom module, including its full module name, display name, enablement state, and last updated time. You can create a custom module at the organization, folder, or project level. Custom modules that you create at the organization or folder level are inherited by child folders and projects. + "ancestorModule": "A String", # Output only. The closest ancestor module that this module inherits the enablement state from. The format is the same as the EventThreatDetectionCustomModule resource name. "config": { # Config for the module. For the resident module, its config value is defined at this level. For the inherited module, its config value is inherited from the ancestor module. "a_key": "", # Properties of the object. }, diff --git a/docs/dyn/securitycenter_v1.organizations.eventThreatDetectionSettings.effectiveCustomModules.html b/docs/dyn/securitycenter_v1.organizations.eventThreatDetectionSettings.effectiveCustomModules.html new file mode 100644 index 00000000000..56105527016 --- /dev/null +++ b/docs/dyn/securitycenter_v1.organizations.eventThreatDetectionSettings.effectiveCustomModules.html @@ -0,0 +1,168 @@ + + + +

Security Command Center API . organizations . eventThreatDetectionSettings . effectiveCustomModules

+

Instance Methods

+

+ close()

+

Close httplib2 connections.

+

+ get(name, x__xgafv=None)

+

Gets an effective Event Threat Detection custom module at the given level.

+

+ list(parent, pageSize=None, pageToken=None, x__xgafv=None)

+

Lists all effective Event Threat Detection custom modules for the given parent. This includes resident modules defined at the scope of the parent along with modules inherited from its ancestors.

+

+ list_next()

+

Retrieves the next page of results.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ +
+ get(name, x__xgafv=None) +
Gets an effective Event Threat Detection custom module at the given level.
+
+Args:
+  name: string, Required. The resource name of the effective Event Threat Detection custom module. Its format is: * "organizations/{organization}/eventThreatDetectionSettings/effectiveCustomModules/{module}". * "folders/{folder}/eventThreatDetectionSettings/effectiveCustomModules/{module}". * "projects/{project}/eventThreatDetectionSettings/effectiveCustomModules/{module}". (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # An EffectiveEventThreatDetectionCustomModule is the representation of an Event Threat Detection custom module at a specified level of the resource hierarchy: organization, folder, or project. If a custom module is inherited from a parent organization or folder, the value of the `enablement_state` property in EffectiveEventThreatDetectionCustomModule is set to the value that is effective in the parent, instead of `INHERITED`. For example, if the module is enabled in a parent organization or folder, the effective `enablement_state` for the module in all child folders or projects is also `enabled`. EffectiveEventThreatDetectionCustomModule is read-only.
+  "config": { # Output only. Config for the effective module.
+    "a_key": "", # Properties of the object.
+  },
+  "description": "A String", # Output only. The description for the module.
+  "displayName": "A String", # Output only. The human readable name to be displayed for the module.
+  "enablementState": "A String", # Output only. The effective state of enablement for the module at the given level of the hierarchy.
+  "name": "A String", # Output only. The resource name of the effective ETD custom module. Its format is: * "organizations/{organization}/eventThreatDetectionSettings/effectiveCustomModules/{module}". * "folders/{folder}/eventThreatDetectionSettings/effectiveCustomModules/{module}". * "projects/{project}/eventThreatDetectionSettings/effectiveCustomModules/{module}".
+  "type": "A String", # Output only. Type for the module. e.g. CONFIGURABLE_BAD_IP.
+}
+
+ +
+ list(parent, pageSize=None, pageToken=None, x__xgafv=None) +
Lists all effective Event Threat Detection custom modules for the given parent. This includes resident modules defined at the scope of the parent along with modules inherited from its ancestors.
+
+Args:
+  parent: string, Required. Name of the parent to list custom modules for. Its format is: * "organizations/{organization}/eventThreatDetectionSettings". * "folders/{folder}/eventThreatDetectionSettings". * "projects/{project}/eventThreatDetectionSettings". (required)
+  pageSize: integer, The maximum number of modules to return. The service may return fewer than this value. If unspecified, at most 10 configs will be returned. The maximum value is 1000; values above 1000 will be coerced to 1000.
+  pageToken: string, A page token, received from a previous `ListEffectiveEventThreatDetectionCustomModules` call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to `ListEffectiveEventThreatDetectionCustomModules` must match the call that provided the page token.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response for listing EffectiveEventThreatDetectionCustomModules.
+  "effectiveEventThreatDetectionCustomModules": [ # Effective custom modules belonging to the requested parent.
+    { # An EffectiveEventThreatDetectionCustomModule is the representation of an Event Threat Detection custom module at a specified level of the resource hierarchy: organization, folder, or project. If a custom module is inherited from a parent organization or folder, the value of the `enablement_state` property in EffectiveEventThreatDetectionCustomModule is set to the value that is effective in the parent, instead of `INHERITED`. For example, if the module is enabled in a parent organization or folder, the effective `enablement_state` for the module in all child folders or projects is also `enabled`. EffectiveEventThreatDetectionCustomModule is read-only.
+      "config": { # Output only. Config for the effective module.
+        "a_key": "", # Properties of the object.
+      },
+      "description": "A String", # Output only. The description for the module.
+      "displayName": "A String", # Output only. The human readable name to be displayed for the module.
+      "enablementState": "A String", # Output only. The effective state of enablement for the module at the given level of the hierarchy.
+      "name": "A String", # Output only. The resource name of the effective ETD custom module. Its format is: * "organizations/{organization}/eventThreatDetectionSettings/effectiveCustomModules/{module}". * "folders/{folder}/eventThreatDetectionSettings/effectiveCustomModules/{module}". * "projects/{project}/eventThreatDetectionSettings/effectiveCustomModules/{module}".
+      "type": "A String", # Output only. Type for the module. e.g. CONFIGURABLE_BAD_IP.
+    },
+  ],
+  "nextPageToken": "A String", # A token, which can be sent as `page_token` to retrieve the next page. If this field is omitted, there are no subsequent pages.
+}
+
+ +
+ list_next() +
Retrieves the next page of results.
+
+        Args:
+          previous_request: The request for the previous page. (required)
+          previous_response: The response from the request for the previous page. (required)
+
+        Returns:
+          A request object that you can call 'execute()' on to request the next
+          page. Returns None if there are no more items in the collection.
+        
+
+ + \ No newline at end of file diff --git a/docs/dyn/securitycenter_v1.organizations.eventThreatDetectionSettings.html b/docs/dyn/securitycenter_v1.organizations.eventThreatDetectionSettings.html index 2dafb60eab1..707f785b4ac 100644 --- a/docs/dyn/securitycenter_v1.organizations.eventThreatDetectionSettings.html +++ b/docs/dyn/securitycenter_v1.organizations.eventThreatDetectionSettings.html @@ -79,6 +79,11 @@

Instance Methods

Returns the customModules Resource.

+

+ effectiveCustomModules() +

+

Returns the effectiveCustomModules Resource.

+

close()

Close httplib2 connections.

@@ -96,7 +101,7 @@

Method Details

Validates the given Event Threat Detection custom module.
 
 Args:
-  parent: string, Required. Resource name of the parent to validate the Custom Module under. Its format is: * "organizations/{organization}/eventThreatDetectionSettings". (required)
+  parent: string, Required. Resource name of the parent to validate the Custom Module under. Its format is: * "organizations/{organization}/eventThreatDetectionSettings". * "folders/{folder}/eventThreatDetectionSettings". * "projects/{project}/eventThreatDetectionSettings". (required)
   body: object, The request body.
     The object takes the form of:
 
diff --git a/docs/dyn/securitycenter_v1.organizations.securityHealthAnalyticsSettings.customModules.html b/docs/dyn/securitycenter_v1.organizations.securityHealthAnalyticsSettings.customModules.html
index 4ebb637d5c8..cd1809e96c1 100644
--- a/docs/dyn/securitycenter_v1.organizations.securityHealthAnalyticsSettings.customModules.html
+++ b/docs/dyn/securitycenter_v1.organizations.securityHealthAnalyticsSettings.customModules.html
@@ -470,7 +470,7 @@ 

Method Details

"updateTime": "A String", # Output only. The time at which the custom module was last updated. } - updateMask: string, The list of fields to update. + updateMask: string, The list of fields to be updated. The only fields that can be updated are `enablement_state` and `custom_config`. If empty or set to the wildcard value `*`, both `enablement_state` and `custom_config` are updated. x__xgafv: string, V1 error format. Allowed values 1 - v1 error format diff --git a/docs/dyn/securitycenter_v1.projects.eventThreatDetectionSettings.customModules.html b/docs/dyn/securitycenter_v1.projects.eventThreatDetectionSettings.customModules.html index 47b82227cca..242871023fb 100644 --- a/docs/dyn/securitycenter_v1.projects.eventThreatDetectionSettings.customModules.html +++ b/docs/dyn/securitycenter_v1.projects.eventThreatDetectionSettings.customModules.html @@ -79,22 +79,28 @@

Instance Methods

Close httplib2 connections.

create(parent, body=None, x__xgafv=None)

-

Creates an ETD custom module at the given level. Creating a module has a side-effect of creating modules at all descendants.

+

Creates a resident Event Threat Detection custom module at the scope of the given Resource Manager parent, and also creates inherited custom modules for all descendants of the given parent. These modules are enabled by default.

delete(name, x__xgafv=None)

-

Deletes an ETD custom module. Deletion at resident level also deletes modules at all descendants. Deletion at any other level is not supported.

+

Deletes the specified Event Threat Detection custom module and all of its descendants in the Resource Manager hierarchy. This method is only supported for resident custom modules.

get(name, x__xgafv=None)

-

Gets an ETD custom module. Retrieves the module at the given level.

+

Gets an Event Threat Detection custom module.

list(parent, pageSize=None, pageToken=None, x__xgafv=None)

-

Lists ETD custom modules. Retrieve all resident and inherited modules at the given level (no descendants).

+

Lists all Event Threat Detection custom modules for the given Resource Manager parent. This includes resident modules defined at the scope of the parent along with modules inherited from ancestors.

+

+ listDescendant(parent, pageSize=None, pageToken=None, x__xgafv=None)

+

Lists all resident Event Threat Detection custom modules under the given Resource Manager parent and its descendants.

+

+ listDescendant_next()

+

Retrieves the next page of results.

list_next()

Retrieves the next page of results.

patch(name, body=None, updateMask=None, x__xgafv=None)

-

Updates an ETD custom module at the given level. All config fields can be updated when updating the module at resident level. Only enablement state can be updated when updating the module at inherited levels. Updating the module has a side-effect that it updates all descendants that are inherited from this module.

+

Updates the Event Threat Detection custom module with the given name based on the given update mask. Updating the enablement state is supported for both resident and inherited modules (though resident modules cannot have an enablement state of "inherited"). Updating the display name or configuration of a module is supported for resident modules only. The type of a module cannot be changed.

Method Details

close() @@ -103,14 +109,15 @@

Method Details

create(parent, body=None, x__xgafv=None) -
Creates an ETD custom module at the given level. Creating a module has a side-effect of creating modules at all descendants.
+  
Creates a resident Event Threat Detection custom module at the scope of the given Resource Manager parent, and also creates inherited custom modules for all descendants of the given parent. These modules are enabled by default.
 
 Args:
   parent: string, Required. The new custom module's parent. Its format is: * "organizations/{organization}/eventThreatDetectionSettings". * "folders/{folder}/eventThreatDetectionSettings". * "projects/{project}/eventThreatDetectionSettings". (required)
   body: object, The request body.
     The object takes the form of:
 
-{ # Represents an instance of an Event Threat Detection custom module, including its full module name, display name, enablement state, and last updated time. You can create a custom module at the organization level only.
+{ # Represents an instance of an Event Threat Detection custom module, including its full module name, display name, enablement state, and last updated time. You can create a custom module at the organization, folder, or project level. Custom modules that you create at the organization or folder level are inherited by child folders and projects.
+  "ancestorModule": "A String", # Output only. The closest ancestor module that this module inherits the enablement state from. The format is the same as the EventThreatDetectionCustomModule resource name.
   "config": { # Config for the module. For the resident module, its config value is defined at this level. For the inherited module, its config value is inherited from the ancestor module.
     "a_key": "", # Properties of the object.
   },
@@ -131,7 +138,8 @@ 

Method Details

Returns: An object of the form: - { # Represents an instance of an Event Threat Detection custom module, including its full module name, display name, enablement state, and last updated time. You can create a custom module at the organization level only. + { # Represents an instance of an Event Threat Detection custom module, including its full module name, display name, enablement state, and last updated time. You can create a custom module at the organization, folder, or project level. Custom modules that you create at the organization or folder level are inherited by child folders and projects. + "ancestorModule": "A String", # Output only. The closest ancestor module that this module inherits the enablement state from. The format is the same as the EventThreatDetectionCustomModule resource name. "config": { # Config for the module. For the resident module, its config value is defined at this level. For the inherited module, its config value is inherited from the ancestor module. "a_key": "", # Properties of the object. }, @@ -147,7 +155,7 @@

Method Details

delete(name, x__xgafv=None) -
Deletes an ETD custom module. Deletion at resident level also deletes modules at all descendants. Deletion at any other level is not supported.
+  
Deletes the specified Event Threat Detection custom module and all of its descendants in the Resource Manager hierarchy. This method is only supported for resident custom modules.
 
 Args:
   name: string, Required. Name of the custom module to delete. Its format is: * "organizations/{organization}/eventThreatDetectionSettings/customModules/{module}". * "folders/{folder}/eventThreatDetectionSettings/customModules/{module}". * "projects/{project}/eventThreatDetectionSettings/customModules/{module}". (required)
@@ -165,7 +173,7 @@ 

Method Details

get(name, x__xgafv=None) -
Gets an ETD custom module. Retrieves the module at the given level.
+  
Gets an Event Threat Detection custom module.
 
 Args:
   name: string, Required. Name of the custom module to get. Its format is: * "organizations/{organization}/eventThreatDetectionSettings/customModules/{module}". * "folders/{folder}/eventThreatDetectionSettings/customModules/{module}". * "projects/{project}/eventThreatDetectionSettings/customModules/{module}". (required)
@@ -177,7 +185,8 @@ 

Method Details

Returns: An object of the form: - { # Represents an instance of an Event Threat Detection custom module, including its full module name, display name, enablement state, and last updated time. You can create a custom module at the organization level only. + { # Represents an instance of an Event Threat Detection custom module, including its full module name, display name, enablement state, and last updated time. You can create a custom module at the organization, folder, or project level. Custom modules that you create at the organization or folder level are inherited by child folders and projects. + "ancestorModule": "A String", # Output only. The closest ancestor module that this module inherits the enablement state from. The format is the same as the EventThreatDetectionCustomModule resource name. "config": { # Config for the module. For the resident module, its config value is defined at this level. For the inherited module, its config value is inherited from the ancestor module. "a_key": "", # Properties of the object. }, @@ -193,10 +202,10 @@

Method Details

list(parent, pageSize=None, pageToken=None, x__xgafv=None) -
Lists ETD custom modules. Retrieve all resident and inherited modules at the given level (no descendants).
+  
Lists all Event Threat Detection custom modules for the given Resource Manager parent. This includes resident modules defined at the scope of the parent along with modules inherited from ancestors.
 
 Args:
-  parent: string, Required. Name of the parent to list custom modules. Its format is: * "organizations/{organization}/eventThreatDetectionSettings". * "folders/{folder}/eventThreatDetectionSettings". * "projects/{project}/eventThreatDetectionSettings". (required)
+  parent: string, Required. Name of the parent to list custom modules under. Its format is: * "organizations/{organization}/eventThreatDetectionSettings". * "folders/{folder}/eventThreatDetectionSettings". * "projects/{project}/eventThreatDetectionSettings". (required)
   pageSize: integer, The maximum number of modules to return. The service may return fewer than this value. If unspecified, at most 10 configs will be returned. The maximum value is 1000; values above 1000 will be coerced to 1000.
   pageToken: string, A page token, received from a previous `ListEventThreatDetectionCustomModules` call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to `ListEventThreatDetectionCustomModules` must match the call that provided the page token.
   x__xgafv: string, V1 error format.
@@ -207,9 +216,10 @@ 

Method Details

Returns: An object of the form: - { # Response for listing EventThreatDetectionCustomModules. + { # Response for listing Event Threat Detection custom modules. "eventThreatDetectionCustomModules": [ # Custom modules belonging to the requested parent. - { # Represents an instance of an Event Threat Detection custom module, including its full module name, display name, enablement state, and last updated time. You can create a custom module at the organization level only. + { # Represents an instance of an Event Threat Detection custom module, including its full module name, display name, enablement state, and last updated time. You can create a custom module at the organization, folder, or project level. Custom modules that you create at the organization or folder level are inherited by child folders and projects. + "ancestorModule": "A String", # Output only. The closest ancestor module that this module inherits the enablement state from. The format is the same as the EventThreatDetectionCustomModule resource name. "config": { # Config for the module. For the resident module, its config value is defined at this level. For the inherited module, its config value is inherited from the ancestor module. "a_key": "", # Properties of the object. }, @@ -226,6 +236,56 @@

Method Details

}
+
+ listDescendant(parent, pageSize=None, pageToken=None, x__xgafv=None) +
Lists all resident Event Threat Detection custom modules under the given Resource Manager parent and its descendants.
+
+Args:
+  parent: string, Required. Name of the parent to list custom modules under. Its format is: * "organizations/{organization}/eventThreatDetectionSettings". * "folders/{folder}/eventThreatDetectionSettings". * "projects/{project}/eventThreatDetectionSettings". (required)
+  pageSize: integer, The maximum number of modules to return. The service may return fewer than this value. If unspecified, at most 10 configs will be returned. The maximum value is 1000; values above 1000 will be coerced to 1000.
+  pageToken: string, A page token, received from a previous `ListDescendantEventThreatDetectionCustomModules` call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to `ListDescendantEventThreatDetectionCustomModules` must match the call that provided the page token.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response for listing current and descendant resident Event Threat Detection custom modules.
+  "eventThreatDetectionCustomModules": [ # Custom modules belonging to the requested parent.
+    { # Represents an instance of an Event Threat Detection custom module, including its full module name, display name, enablement state, and last updated time. You can create a custom module at the organization, folder, or project level. Custom modules that you create at the organization or folder level are inherited by child folders and projects.
+      "ancestorModule": "A String", # Output only. The closest ancestor module that this module inherits the enablement state from. The format is the same as the EventThreatDetectionCustomModule resource name.
+      "config": { # Config for the module. For the resident module, its config value is defined at this level. For the inherited module, its config value is inherited from the ancestor module.
+        "a_key": "", # Properties of the object.
+      },
+      "description": "A String", # The description for the module.
+      "displayName": "A String", # The human readable name to be displayed for the module.
+      "enablementState": "A String", # The state of enablement for the module at the given level of the hierarchy.
+      "lastEditor": "A String", # Output only. The editor the module was last updated by.
+      "name": "A String", # Immutable. The resource name of the Event Threat Detection custom module. Its format is: * "organizations/{organization}/eventThreatDetectionSettings/customModules/{module}". * "folders/{folder}/eventThreatDetectionSettings/customModules/{module}". * "projects/{project}/eventThreatDetectionSettings/customModules/{module}".
+      "type": "A String", # Type for the module. e.g. CONFIGURABLE_BAD_IP.
+      "updateTime": "A String", # Output only. The time the module was last updated.
+    },
+  ],
+  "nextPageToken": "A String", # A token, which can be sent as `page_token` to retrieve the next page. If this field is omitted, there are no subsequent pages.
+}
+
+ +
+ listDescendant_next() +
Retrieves the next page of results.
+
+        Args:
+          previous_request: The request for the previous page. (required)
+          previous_response: The response from the request for the previous page. (required)
+
+        Returns:
+          A request object that you can call 'execute()' on to request the next
+          page. Returns None if there are no more items in the collection.
+        
+
+
list_next()
Retrieves the next page of results.
@@ -242,14 +302,15 @@ 

Method Details

patch(name, body=None, updateMask=None, x__xgafv=None) -
Updates an ETD custom module at the given level. All config fields can be updated when updating the module at resident level. Only enablement state can be updated when updating the module at inherited levels. Updating the module has a side-effect that it updates all descendants that are inherited from this module.
+  
Updates the Event Threat Detection custom module with the given name based on the given update mask. Updating the enablement state is supported for both resident and inherited modules (though resident modules cannot have an enablement state of "inherited"). Updating the display name or configuration of a module is supported for resident modules only. The type of a module cannot be changed.
 
 Args:
   name: string, Immutable. The resource name of the Event Threat Detection custom module. Its format is: * "organizations/{organization}/eventThreatDetectionSettings/customModules/{module}". * "folders/{folder}/eventThreatDetectionSettings/customModules/{module}". * "projects/{project}/eventThreatDetectionSettings/customModules/{module}". (required)
   body: object, The request body.
     The object takes the form of:
 
-{ # Represents an instance of an Event Threat Detection custom module, including its full module name, display name, enablement state, and last updated time. You can create a custom module at the organization level only.
+{ # Represents an instance of an Event Threat Detection custom module, including its full module name, display name, enablement state, and last updated time. You can create a custom module at the organization, folder, or project level. Custom modules that you create at the organization or folder level are inherited by child folders and projects.
+  "ancestorModule": "A String", # Output only. The closest ancestor module that this module inherits the enablement state from. The format is the same as the EventThreatDetectionCustomModule resource name.
   "config": { # Config for the module. For the resident module, its config value is defined at this level. For the inherited module, its config value is inherited from the ancestor module.
     "a_key": "", # Properties of the object.
   },
@@ -271,7 +332,8 @@ 

Method Details

Returns: An object of the form: - { # Represents an instance of an Event Threat Detection custom module, including its full module name, display name, enablement state, and last updated time. You can create a custom module at the organization level only. + { # Represents an instance of an Event Threat Detection custom module, including its full module name, display name, enablement state, and last updated time. You can create a custom module at the organization, folder, or project level. Custom modules that you create at the organization or folder level are inherited by child folders and projects. + "ancestorModule": "A String", # Output only. The closest ancestor module that this module inherits the enablement state from. The format is the same as the EventThreatDetectionCustomModule resource name. "config": { # Config for the module. For the resident module, its config value is defined at this level. For the inherited module, its config value is inherited from the ancestor module. "a_key": "", # Properties of the object. }, diff --git a/docs/dyn/securitycenter_v1.projects.eventThreatDetectionSettings.effectiveCustomModules.html b/docs/dyn/securitycenter_v1.projects.eventThreatDetectionSettings.effectiveCustomModules.html new file mode 100644 index 00000000000..dee9038897e --- /dev/null +++ b/docs/dyn/securitycenter_v1.projects.eventThreatDetectionSettings.effectiveCustomModules.html @@ -0,0 +1,168 @@ + + + +

Security Command Center API . projects . eventThreatDetectionSettings . effectiveCustomModules

+

Instance Methods

+

+ close()

+

Close httplib2 connections.

+

+ get(name, x__xgafv=None)

+

Gets an effective Event Threat Detection custom module at the given level.

+

+ list(parent, pageSize=None, pageToken=None, x__xgafv=None)

+

Lists all effective Event Threat Detection custom modules for the given parent. This includes resident modules defined at the scope of the parent along with modules inherited from its ancestors.

+

+ list_next()

+

Retrieves the next page of results.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ +
+ get(name, x__xgafv=None) +
Gets an effective Event Threat Detection custom module at the given level.
+
+Args:
+  name: string, Required. The resource name of the effective Event Threat Detection custom module. Its format is: * "organizations/{organization}/eventThreatDetectionSettings/effectiveCustomModules/{module}". * "folders/{folder}/eventThreatDetectionSettings/effectiveCustomModules/{module}". * "projects/{project}/eventThreatDetectionSettings/effectiveCustomModules/{module}". (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # An EffectiveEventThreatDetectionCustomModule is the representation of an Event Threat Detection custom module at a specified level of the resource hierarchy: organization, folder, or project. If a custom module is inherited from a parent organization or folder, the value of the `enablement_state` property in EffectiveEventThreatDetectionCustomModule is set to the value that is effective in the parent, instead of `INHERITED`. For example, if the module is enabled in a parent organization or folder, the effective `enablement_state` for the module in all child folders or projects is also `enabled`. EffectiveEventThreatDetectionCustomModule is read-only.
+  "config": { # Output only. Config for the effective module.
+    "a_key": "", # Properties of the object.
+  },
+  "description": "A String", # Output only. The description for the module.
+  "displayName": "A String", # Output only. The human readable name to be displayed for the module.
+  "enablementState": "A String", # Output only. The effective state of enablement for the module at the given level of the hierarchy.
+  "name": "A String", # Output only. The resource name of the effective ETD custom module. Its format is: * "organizations/{organization}/eventThreatDetectionSettings/effectiveCustomModules/{module}". * "folders/{folder}/eventThreatDetectionSettings/effectiveCustomModules/{module}". * "projects/{project}/eventThreatDetectionSettings/effectiveCustomModules/{module}".
+  "type": "A String", # Output only. Type for the module. e.g. CONFIGURABLE_BAD_IP.
+}
+
+ +
+ list(parent, pageSize=None, pageToken=None, x__xgafv=None) +
Lists all effective Event Threat Detection custom modules for the given parent. This includes resident modules defined at the scope of the parent along with modules inherited from its ancestors.
+
+Args:
+  parent: string, Required. Name of the parent to list custom modules for. Its format is: * "organizations/{organization}/eventThreatDetectionSettings". * "folders/{folder}/eventThreatDetectionSettings". * "projects/{project}/eventThreatDetectionSettings". (required)
+  pageSize: integer, The maximum number of modules to return. The service may return fewer than this value. If unspecified, at most 10 configs will be returned. The maximum value is 1000; values above 1000 will be coerced to 1000.
+  pageToken: string, A page token, received from a previous `ListEffectiveEventThreatDetectionCustomModules` call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to `ListEffectiveEventThreatDetectionCustomModules` must match the call that provided the page token.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response for listing EffectiveEventThreatDetectionCustomModules.
+  "effectiveEventThreatDetectionCustomModules": [ # Effective custom modules belonging to the requested parent.
+    { # An EffectiveEventThreatDetectionCustomModule is the representation of an Event Threat Detection custom module at a specified level of the resource hierarchy: organization, folder, or project. If a custom module is inherited from a parent organization or folder, the value of the `enablement_state` property in EffectiveEventThreatDetectionCustomModule is set to the value that is effective in the parent, instead of `INHERITED`. For example, if the module is enabled in a parent organization or folder, the effective `enablement_state` for the module in all child folders or projects is also `enabled`. EffectiveEventThreatDetectionCustomModule is read-only.
+      "config": { # Output only. Config for the effective module.
+        "a_key": "", # Properties of the object.
+      },
+      "description": "A String", # Output only. The description for the module.
+      "displayName": "A String", # Output only. The human readable name to be displayed for the module.
+      "enablementState": "A String", # Output only. The effective state of enablement for the module at the given level of the hierarchy.
+      "name": "A String", # Output only. The resource name of the effective ETD custom module. Its format is: * "organizations/{organization}/eventThreatDetectionSettings/effectiveCustomModules/{module}". * "folders/{folder}/eventThreatDetectionSettings/effectiveCustomModules/{module}". * "projects/{project}/eventThreatDetectionSettings/effectiveCustomModules/{module}".
+      "type": "A String", # Output only. Type for the module. e.g. CONFIGURABLE_BAD_IP.
+    },
+  ],
+  "nextPageToken": "A String", # A token, which can be sent as `page_token` to retrieve the next page. If this field is omitted, there are no subsequent pages.
+}
+
+ +
+ list_next() +
Retrieves the next page of results.
+
+        Args:
+          previous_request: The request for the previous page. (required)
+          previous_response: The response from the request for the previous page. (required)
+
+        Returns:
+          A request object that you can call 'execute()' on to request the next
+          page. Returns None if there are no more items in the collection.
+        
+
+ + \ No newline at end of file diff --git a/docs/dyn/securitycenter_v1.projects.eventThreatDetectionSettings.html b/docs/dyn/securitycenter_v1.projects.eventThreatDetectionSettings.html index b848a83b9ec..9f6a9590f0a 100644 --- a/docs/dyn/securitycenter_v1.projects.eventThreatDetectionSettings.html +++ b/docs/dyn/securitycenter_v1.projects.eventThreatDetectionSettings.html @@ -79,12 +79,17 @@

Instance Methods

Returns the customModules Resource.

+

+ effectiveCustomModules() +

+

Returns the effectiveCustomModules Resource.

+

close()

Close httplib2 connections.

validateCustomModule(parent, body=None, x__xgafv=None)

-

Validates the given Event Threat Detection Custom Module.

+

Validates the given Event Threat Detection custom module.

Method Details

close() @@ -93,14 +98,14 @@

Method Details

validateCustomModule(parent, body=None, x__xgafv=None) -
Validates the given Event Threat Detection Custom Module.
+  
Validates the given Event Threat Detection custom module.
 
 Args:
   parent: string, Required. Resource name of the parent to validate the Custom Module under. Its format is: * "organizations/{organization}/eventThreatDetectionSettings". * "folders/{folder}/eventThreatDetectionSettings". * "projects/{project}/eventThreatDetectionSettings". (required)
   body: object, The request body.
     The object takes the form of:
 
-{ # Request to validate an EventThreatDetectionCustomModule.
+{ # Request to validate an Event Threat Detection custom module.
   "rawText": "A String", # Required. The raw text of the module's contents. Used to generate error messages.
   "type": "A String", # Required. The type of the module (e.g. CONFIGURABLE_BAD_IP).
 }
@@ -113,7 +118,7 @@ 

Method Details

Returns: An object of the form: - { # Response to validating an Event Threat Detection Custom Module. + { # Response to validating an Event Threat Detection custom module. "errors": { # A list of zero or more errors encountered while validating the uploaded configuration of an Event Threat Detection Custom Module. # A list of errors returned by the validator. If the list is empty, there were no errors. "errors": [ { # An error encountered while validating the uploaded configuration of an Event Threat Detection Custom Module. diff --git a/docs/dyn/securitycenter_v1.projects.html b/docs/dyn/securitycenter_v1.projects.html index 5cbea666193..b0a42c1135a 100644 --- a/docs/dyn/securitycenter_v1.projects.html +++ b/docs/dyn/securitycenter_v1.projects.html @@ -84,6 +84,11 @@

Instance Methods

Returns the bigQueryExports Resource.

+

+ eventThreatDetectionSettings() +

+

Returns the eventThreatDetectionSettings Resource.

+

findings()

diff --git a/docs/dyn/securitycenter_v1.projects.securityHealthAnalyticsSettings.customModules.html b/docs/dyn/securitycenter_v1.projects.securityHealthAnalyticsSettings.customModules.html index 84b336616bd..bb8b6482360 100644 --- a/docs/dyn/securitycenter_v1.projects.securityHealthAnalyticsSettings.customModules.html +++ b/docs/dyn/securitycenter_v1.projects.securityHealthAnalyticsSettings.customModules.html @@ -470,7 +470,7 @@

Method Details

"updateTime": "A String", # Output only. The time at which the custom module was last updated. } - updateMask: string, The list of fields to update. + updateMask: string, The list of fields to be updated. The only fields that can be updated are `enablement_state` and `custom_config`. If empty or set to the wildcard value `*`, both `enablement_state` and `custom_config` are updated. x__xgafv: string, V1 error format. Allowed values 1 - v1 error format diff --git a/docs/dyn/vmmigration_v1.projects.locations.sources.migratingVms.cloneJobs.html b/docs/dyn/vmmigration_v1.projects.locations.sources.migratingVms.cloneJobs.html index 1646bc9b3f3..5ee6fbf85dc 100644 --- a/docs/dyn/vmmigration_v1.projects.locations.sources.migratingVms.cloneJobs.html +++ b/docs/dyn/vmmigration_v1.projects.locations.sources.migratingVms.cloneJobs.html @@ -88,7 +88,7 @@

Instance Methods

Gets details of a single CloneJob.

list(parent, filter=None, orderBy=None, pageSize=None, pageToken=None, x__xgafv=None)

-

Lists CloneJobs of a given migrating VM.

+

Lists the CloneJobs of a migrating VM. Only the 25 most recent CloneJobs are returned.

list_next()

Retrieves the next page of results.

@@ -388,7 +388,7 @@

Method Details

list(parent, filter=None, orderBy=None, pageSize=None, pageToken=None, x__xgafv=None) -
Lists CloneJobs of a given migrating VM.
+  
Lists the CloneJobs of a migrating VM. Only the 25 most recent CloneJobs are returned.
 
 Args:
   parent: string, Required. The parent, which owns this collection of source VMs. (required)
diff --git a/docs/dyn/vmmigration_v1.projects.locations.sources.migratingVms.cutoverJobs.html b/docs/dyn/vmmigration_v1.projects.locations.sources.migratingVms.cutoverJobs.html
index e0b76e1843e..240c950cbbe 100644
--- a/docs/dyn/vmmigration_v1.projects.locations.sources.migratingVms.cutoverJobs.html
+++ b/docs/dyn/vmmigration_v1.projects.locations.sources.migratingVms.cutoverJobs.html
@@ -88,7 +88,7 @@ 

Instance Methods

Gets details of a single CutoverJob.

list(parent, filter=None, orderBy=None, pageSize=None, pageToken=None, x__xgafv=None)

-

Lists CutoverJobs of a given migrating VM.

+

Lists the CutoverJobs of a migrating VM. Only the 25 most recent CutoverJobs are returned.

list_next()

Retrieves the next page of results.

@@ -608,7 +608,7 @@

Method Details

list(parent, filter=None, orderBy=None, pageSize=None, pageToken=None, x__xgafv=None) -
Lists CutoverJobs of a given migrating VM.
+  
Lists the CutoverJobs of a migrating VM. Only the 25 most recent CutoverJobs are returned.
 
 Args:
   parent: string, Required. The parent, which owns this collection of migrating VMs. (required)
diff --git a/docs/dyn/vpcaccess_v1beta1.projects.locations.connectors.html b/docs/dyn/vpcaccess_v1beta1.projects.locations.connectors.html
index 467c5e306dc..228a124c765 100644
--- a/docs/dyn/vpcaccess_v1beta1.projects.locations.connectors.html
+++ b/docs/dyn/vpcaccess_v1beta1.projects.locations.connectors.html
@@ -114,7 +114,9 @@ 

Method Details

"connectedProjects": [ # Output only. List of projects using the connector. "A String", ], + "createTime": "A String", # Output only. The creation time of the connector. "ipCidrRange": "A String", # The range of internal addresses that follows RFC 4632 notation. Example: `10.132.0.0/28`. + "lastRestartTime": "A String", # Output only. The last restart time of the connector. "machineType": "A String", # Machine type of VM Instance underlying connector. Default is e2-micro "maxInstances": 42, # Maximum value of instances in autoscaling group underlying the connector. "maxThroughput": 42, # Maximum throughput of the connector in Mbps. Refers to the expected throughput when using an `e2-micro` machine type. Value must be a multiple of 100 from 300 through 1000. Must be higher than the value specified by --min-throughput. If both max-throughput and max-instances are provided, max-instances takes precedence over max-throughput. The use of `max-throughput` is discouraged in favor of `max-instances`. @@ -212,7 +214,9 @@

Method Details

"connectedProjects": [ # Output only. List of projects using the connector. "A String", ], + "createTime": "A String", # Output only. The creation time of the connector. "ipCidrRange": "A String", # The range of internal addresses that follows RFC 4632 notation. Example: `10.132.0.0/28`. + "lastRestartTime": "A String", # Output only. The last restart time of the connector. "machineType": "A String", # Machine type of VM Instance underlying connector. Default is e2-micro "maxInstances": 42, # Maximum value of instances in autoscaling group underlying the connector. "maxThroughput": 42, # Maximum throughput of the connector in Mbps. Refers to the expected throughput when using an `e2-micro` machine type. Value must be a multiple of 100 from 300 through 1000. Must be higher than the value specified by --min-throughput. If both max-throughput and max-instances are provided, max-instances takes precedence over max-throughput. The use of `max-throughput` is discouraged in favor of `max-instances`. @@ -250,7 +254,9 @@

Method Details

"connectedProjects": [ # Output only. List of projects using the connector. "A String", ], + "createTime": "A String", # Output only. The creation time of the connector. "ipCidrRange": "A String", # The range of internal addresses that follows RFC 4632 notation. Example: `10.132.0.0/28`. + "lastRestartTime": "A String", # Output only. The last restart time of the connector. "machineType": "A String", # Machine type of VM Instance underlying connector. Default is e2-micro "maxInstances": 42, # Maximum value of instances in autoscaling group underlying the connector. "maxThroughput": 42, # Maximum throughput of the connector in Mbps. Refers to the expected throughput when using an `e2-micro` machine type. Value must be a multiple of 100 from 300 through 1000. Must be higher than the value specified by --min-throughput. If both max-throughput and max-instances are provided, max-instances takes precedence over max-throughput. The use of `max-throughput` is discouraged in favor of `max-instances`. @@ -296,7 +302,9 @@

Method Details

"connectedProjects": [ # Output only. List of projects using the connector. "A String", ], + "createTime": "A String", # Output only. The creation time of the connector. "ipCidrRange": "A String", # The range of internal addresses that follows RFC 4632 notation. Example: `10.132.0.0/28`. + "lastRestartTime": "A String", # Output only. The last restart time of the connector. "machineType": "A String", # Machine type of VM Instance underlying connector. Default is e2-micro "maxInstances": 42, # Maximum value of instances in autoscaling group underlying the connector. "maxThroughput": 42, # Maximum throughput of the connector in Mbps. Refers to the expected throughput when using an `e2-micro` machine type. Value must be a multiple of 100 from 300 through 1000. Must be higher than the value specified by --min-throughput. If both max-throughput and max-instances are provided, max-instances takes precedence over max-throughput. The use of `max-throughput` is discouraged in favor of `max-instances`. diff --git a/docs/dyn/walletobjects_v1.jwt.html b/docs/dyn/walletobjects_v1.jwt.html index 9e8de567141..3bb95776fef 100644 --- a/docs/dyn/walletobjects_v1.jwt.html +++ b/docs/dyn/walletobjects_v1.jwt.html @@ -95,7 +95,7 @@

Method Details

The object takes the form of: { - "jwt": "A String", # A string representing a JWT of the format described at https://developers.google.com/pay/passes/reference/s2w-reference#google-pay-api-for-passes-jwt + "jwt": "A String", # A string representing a JWT of the format described at https://developers.google.com/wallet/reference/rest/v1/Jwt } x__xgafv: string, V1 error format. diff --git a/docs/dyn/workflowexecutions_v1.projects.locations.workflows.executions.html b/docs/dyn/workflowexecutions_v1.projects.locations.workflows.executions.html index 180f0e101ea..d7adde331fc 100644 --- a/docs/dyn/workflowexecutions_v1.projects.locations.workflows.executions.html +++ b/docs/dyn/workflowexecutions_v1.projects.locations.workflows.executions.html @@ -92,7 +92,7 @@

Instance Methods

Close httplib2 connections.

create(parent, body=None, x__xgafv=None)

-

Creates a new execution using the latest revision of the given workflow.

+

Creates a new execution using the latest revision of the given workflow. For more information, see Execute a workflow.

exportData(name, x__xgafv=None)

Returns all metadata stored about an execution, excluding most data that is already accessible using other API methods.

@@ -178,7 +178,7 @@

Method Details

create(parent, body=None, x__xgafv=None) -
Creates a new execution using the latest revision of the given workflow.
+  
Creates a new execution using the latest revision of the given workflow. For more information, see Execute a workflow.
 
 Args:
   parent: string, Required. Name of the workflow for which an execution should be created. Format: projects/{project}/locations/{location}/workflows/{workflow} The latest revision of the workflow will be used. (required)
diff --git a/googleapiclient/discovery_cache/documents/acceleratedmobilepageurl.v1.json b/googleapiclient/discovery_cache/documents/acceleratedmobilepageurl.v1.json
index 472914404bc..bb7d46be092 100644
--- a/googleapiclient/discovery_cache/documents/acceleratedmobilepageurl.v1.json
+++ b/googleapiclient/discovery_cache/documents/acceleratedmobilepageurl.v1.json
@@ -115,7 +115,7 @@
       }
     }
   },
-  "revision": "20231205",
+  "revision": "20231210",
   "rootUrl": "https://acceleratedmobilepageurl.googleapis.com/",
   "schemas": {
     "AmpUrl": {
diff --git a/googleapiclient/discovery_cache/documents/accessapproval.v1.json b/googleapiclient/discovery_cache/documents/accessapproval.v1.json
index 99e06518d60..50094fdfa5c 100644
--- a/googleapiclient/discovery_cache/documents/accessapproval.v1.json
+++ b/googleapiclient/discovery_cache/documents/accessapproval.v1.json
@@ -913,7 +913,7 @@
       }
     }
   },
-  "revision": "20231201",
+  "revision": "20231208",
   "rootUrl": "https://accessapproval.googleapis.com/",
   "schemas": {
     "AccessApprovalServiceAccount": {
diff --git a/googleapiclient/discovery_cache/documents/acmedns.v1.json b/googleapiclient/discovery_cache/documents/acmedns.v1.json
index 6eac2eaff3e..8a6c4f8ec1c 100644
--- a/googleapiclient/discovery_cache/documents/acmedns.v1.json
+++ b/googleapiclient/discovery_cache/documents/acmedns.v1.json
@@ -146,7 +146,7 @@
       }
     }
   },
-  "revision": "20231205",
+  "revision": "20231210",
   "rootUrl": "https://acmedns.googleapis.com/",
   "schemas": {
     "AcmeChallengeSet": {
diff --git a/googleapiclient/discovery_cache/documents/adexchangebuyer2.v2beta1.json b/googleapiclient/discovery_cache/documents/adexchangebuyer2.v2beta1.json
index b339c4982b4..88d4d6cc405 100644
--- a/googleapiclient/discovery_cache/documents/adexchangebuyer2.v2beta1.json
+++ b/googleapiclient/discovery_cache/documents/adexchangebuyer2.v2beta1.json
@@ -3115,7 +3115,7 @@
       }
     }
   },
-  "revision": "20231206",
+  "revision": "20231211",
   "rootUrl": "https://adexchangebuyer.googleapis.com/",
   "schemas": {
     "AbsoluteDateRange": {
diff --git a/googleapiclient/discovery_cache/documents/admob.v1.json b/googleapiclient/discovery_cache/documents/admob.v1.json
index e057b859504..57de0e78857 100644
--- a/googleapiclient/discovery_cache/documents/admob.v1.json
+++ b/googleapiclient/discovery_cache/documents/admob.v1.json
@@ -321,7 +321,7 @@
       }
     }
   },
-  "revision": "20231206",
+  "revision": "20231211",
   "rootUrl": "https://admob.googleapis.com/",
   "schemas": {
     "AdUnit": {
diff --git a/googleapiclient/discovery_cache/documents/admob.v1beta.json b/googleapiclient/discovery_cache/documents/admob.v1beta.json
index cfb3e2fbfc3..c722f7a3c23 100644
--- a/googleapiclient/discovery_cache/documents/admob.v1beta.json
+++ b/googleapiclient/discovery_cache/documents/admob.v1beta.json
@@ -758,7 +758,7 @@
       }
     }
   },
-  "revision": "20231206",
+  "revision": "20231211",
   "rootUrl": "https://admob.googleapis.com/",
   "schemas": {
     "AdSource": {
diff --git a/googleapiclient/discovery_cache/documents/adsense.v2.json b/googleapiclient/discovery_cache/documents/adsense.v2.json
index 675e46f20f7..4c19fa96d30 100644
--- a/googleapiclient/discovery_cache/documents/adsense.v2.json
+++ b/googleapiclient/discovery_cache/documents/adsense.v2.json
@@ -1844,7 +1844,7 @@
       }
     }
   },
-  "revision": "20231207",
+  "revision": "20231210",
   "rootUrl": "https://adsense.googleapis.com/",
   "schemas": {
     "Account": {
diff --git a/googleapiclient/discovery_cache/documents/advisorynotifications.v1.json b/googleapiclient/discovery_cache/documents/advisorynotifications.v1.json
index b5885b3fe7d..6c684235c08 100644
--- a/googleapiclient/discovery_cache/documents/advisorynotifications.v1.json
+++ b/googleapiclient/discovery_cache/documents/advisorynotifications.v1.json
@@ -357,7 +357,7 @@
       }
     }
   },
-  "revision": "20231203",
+  "revision": "20231210",
   "rootUrl": "https://advisorynotifications.googleapis.com/",
   "schemas": {
     "GoogleCloudAdvisorynotificationsV1Attachment": {
diff --git a/googleapiclient/discovery_cache/documents/aiplatform.v1.json b/googleapiclient/discovery_cache/documents/aiplatform.v1.json
index 44f6f4a88c6..034d9fc0a48 100644
--- a/googleapiclient/discovery_cache/documents/aiplatform.v1.json
+++ b/googleapiclient/discovery_cache/documents/aiplatform.v1.json
@@ -14684,9 +14684,733 @@
       }
     }
   },
-  "revision": "20231129",
+  "revision": "20231211",
   "rootUrl": "https://aiplatform.googleapis.com/",
   "schemas": {
+    "CloudAiLargeModelsVisionEmbedVideoResponse": {
+      "description": "Video embedding response.",
+      "id": "CloudAiLargeModelsVisionEmbedVideoResponse",
+      "properties": {
+        "videoEmbeddings": {
+          "description": "The embedding vector for the video.",
+          "items": {
+            "type": "any"
+          },
+          "type": "array"
+        }
+      },
+      "type": "object"
+    },
+    "CloudAiLargeModelsVisionFilteredText": {
+      "description": "Details for filtered input text.",
+      "id": "CloudAiLargeModelsVisionFilteredText",
+      "properties": {
+        "category": {
+          "description": "Confidence level",
+          "enum": [
+            "RAI_CATEGORY_UNSPECIFIED",
+            "OBSCENE",
+            "SEXUALLY_EXPLICIT",
+            "IDENTITY_ATTACK",
+            "VIOLENCE_ABUSE",
+            "CSAI",
+            "SPII",
+            "CELEBRITY",
+            "FACE_IMG",
+            "WATERMARK_IMG",
+            "MEMORIZATION_IMG",
+            "CSAI_IMG",
+            "PORN_IMG",
+            "VIOLENCE_IMG",
+            "CHILD_IMG",
+            "TOXIC",
+            "SENSITIVE_WORD",
+            "PERSON_IMG",
+            "ICA_IMG",
+            "SEXUAL_IMG",
+            "IU_IMG",
+            "RACY_IMG",
+            "PEDO_IMG",
+            "DEATH_HARM_TRAGEDY",
+            "HEALTH",
+            "FIREARMS_WEAPONS",
+            "RELIGIOUS_BELIEF",
+            "ILLICIT_DRUGS",
+            "WAR_CONFLICT",
+            "POLITICS",
+            "HATE_SYMBOL_IMG",
+            "CHILD_TEXT",
+            "DANGEROUS_CONTENT",
+            "RECITATION_TEXT"
+          ],
+          "enumDescriptions": [
+            "",
+            "",
+            "Porn",
+            "Hate",
+            "",
+            "",
+            "",
+            "",
+            "",
+            "",
+            "",
+            "",
+            "",
+            "",
+            "",
+            "",
+            "",
+            "",
+            "",
+            "",
+            "",
+            "",
+            "",
+            "SafetyAttributes returned but not filtered on",
+            "",
+            "",
+            "",
+            "",
+            "",
+            "",
+            "End of list",
+            "",
+            "Text category from SafetyCat v3",
+            ""
+          ],
+          "type": "string"
+        },
+        "confidence": {
+          "description": "Filtered category",
+          "enum": [
+            "CONFIDENCE_UNSPECIFIED",
+            "CONFIDENCE_LOW",
+            "CONFIDENCE_MEDIUM",
+            "CONFIDENCE_HIGH"
+          ],
+          "enumDescriptions": [
+            "",
+            "",
+            "",
+            ""
+          ],
+          "type": "string"
+        },
+        "prompt": {
+          "description": "Input prompt",
+          "type": "string"
+        },
+        "score": {
+          "description": "Score for category",
+          "format": "double",
+          "type": "number"
+        }
+      },
+      "type": "object"
+    },
+    "CloudAiLargeModelsVisionGenerateVideoResponse": {
+      "description": "Generate video response.",
+      "id": "CloudAiLargeModelsVisionGenerateVideoResponse",
+      "properties": {
+        "generatedSamples": {
+          "description": "The generates samples.",
+          "items": {
+            "$ref": "CloudAiLargeModelsVisionMedia"
+          },
+          "type": "array"
+        },
+        "raiMediaFilteredCount": {
+          "description": "Returns if any videos were filtered due to RAI policies.",
+          "format": "int32",
+          "type": "integer"
+        },
+        "raiMediaFilteredReasons": {
+          "description": "Returns rai failure reasons if any.",
+          "items": {
+            "type": "string"
+          },
+          "type": "array"
+        },
+        "raiTextFilteredReason": {
+          "$ref": "CloudAiLargeModelsVisionFilteredText",
+          "description": "Returns filtered text rai info."
+        }
+      },
+      "type": "object"
+    },
+    "CloudAiLargeModelsVisionImage": {
+      "description": "Image.",
+      "id": "CloudAiLargeModelsVisionImage",
+      "properties": {
+        "encoding": {
+          "description": "Image encoding, encoded as \"image/png\" or \"image/jpg\".",
+          "type": "string"
+        },
+        "image": {
+          "description": "Raw bytes.",
+          "format": "byte",
+          "type": "string"
+        },
+        "imageRaiScores": {
+          "$ref": "CloudAiLargeModelsVisionImageRAIScores",
+          "description": "RAI scores for generated image."
+        },
+        "raiInfo": {
+          "$ref": "CloudAiLargeModelsVisionRaiInfo",
+          "description": "RAI info for image"
+        },
+        "semanticFilterResponse": {
+          "$ref": "CloudAiLargeModelsVisionSemanticFilterResponse",
+          "description": "Semantic filter info for image."
+        },
+        "uri": {
+          "description": "Path to another storage (typically Google Cloud Storage).",
+          "type": "string"
+        }
+      },
+      "type": "object"
+    },
+    "CloudAiLargeModelsVisionImageRAIScores": {
+      "description": "RAI scores for generated image returned.",
+      "id": "CloudAiLargeModelsVisionImageRAIScores",
+      "properties": {
+        "agileWatermarkDetectionScore": {
+          "description": "Agile watermark score for image.",
+          "format": "double",
+          "type": "number"
+        }
+      },
+      "type": "object"
+    },
+    "CloudAiLargeModelsVisionMedia": {
+      "description": "Media.",
+      "id": "CloudAiLargeModelsVisionMedia",
+      "properties": {
+        "image": {
+          "$ref": "CloudAiLargeModelsVisionImage",
+          "description": "Image."
+        },
+        "video": {
+          "$ref": "CloudAiLargeModelsVisionVideo",
+          "description": "Video"
+        }
+      },
+      "type": "object"
+    },
+    "CloudAiLargeModelsVisionMediaGenerateContentResponse": {
+      "description": "Generate media content response",
+      "id": "CloudAiLargeModelsVisionMediaGenerateContentResponse",
+      "properties": {
+        "response": {
+          "$ref": "CloudAiNlLlmProtoServiceGenerateMultiModalResponse",
+          "description": "Response to the user's request."
+        }
+      },
+      "type": "object"
+    },
+    "CloudAiLargeModelsVisionNamedBoundingBox": {
+      "id": "CloudAiLargeModelsVisionNamedBoundingBox",
+      "properties": {
+        "classes": {
+          "items": {
+            "type": "string"
+          },
+          "type": "array"
+        },
+        "entities": {
+          "items": {
+            "type": "string"
+          },
+          "type": "array"
+        },
+        "scores": {
+          "items": {
+            "format": "float",
+            "type": "number"
+          },
+          "type": "array"
+        },
+        "x1": {
+          "format": "float",
+          "type": "number"
+        },
+        "x2": {
+          "format": "float",
+          "type": "number"
+        },
+        "y1": {
+          "format": "float",
+          "type": "number"
+        },
+        "y2": {
+          "format": "float",
+          "type": "number"
+        }
+      },
+      "type": "object"
+    },
+    "CloudAiLargeModelsVisionRaiInfo": {
+      "id": "CloudAiLargeModelsVisionRaiInfo",
+      "properties": {
+        "raiCategories": {
+          "description": "List of rai categories' information to return",
+          "items": {
+            "type": "string"
+          },
+          "type": "array"
+        },
+        "scores": {
+          "description": "List of rai scores mapping to the rai categories. Rounded to 1 decimal place.",
+          "items": {
+            "format": "float",
+            "type": "number"
+          },
+          "type": "array"
+        }
+      },
+      "type": "object"
+    },
+    "CloudAiLargeModelsVisionReasonVideoResponse": {
+      "description": "Video reasoning response.",
+      "id": "CloudAiLargeModelsVisionReasonVideoResponse",
+      "properties": {
+        "responses": {
+          "description": "Generated text responses. The generated responses for different segments within the same video.",
+          "items": {
+            "$ref": "CloudAiLargeModelsVisionReasonVideoResponseTextResponse"
+          },
+          "type": "array"
+        }
+      },
+      "type": "object"
+    },
+    "CloudAiLargeModelsVisionReasonVideoResponseTextResponse": {
+      "description": "Contains text that is the response of the video captioning.",
+      "id": "CloudAiLargeModelsVisionReasonVideoResponseTextResponse",
+      "properties": {
+        "relativeTemporalPartition": {
+          "$ref": "CloudAiLargeModelsVisionRelativeTemporalPartition",
+          "description": "Partition of the caption's video in time. This field is intended for video captioning. To represent the start time and end time of the caption's video."
+        },
+        "text": {
+          "description": "Text information",
+          "type": "string"
+        }
+      },
+      "type": "object"
+    },
+    "CloudAiLargeModelsVisionRelativeTemporalPartition": {
+      "description": "For ease of use, assume that the start_offset is inclusive and the end_offset is exclusive. In mathematical terms, the partition would be written as [start_offset, end_offset).",
+      "id": "CloudAiLargeModelsVisionRelativeTemporalPartition",
+      "properties": {
+        "endOffset": {
+          "description": "End time offset of the partition.",
+          "format": "google-duration",
+          "type": "string"
+        },
+        "startOffset": {
+          "description": "Start time offset of the partition.",
+          "format": "google-duration",
+          "type": "string"
+        }
+      },
+      "type": "object"
+    },
+    "CloudAiLargeModelsVisionSemanticFilterResponse": {
+      "id": "CloudAiLargeModelsVisionSemanticFilterResponse",
+      "properties": {
+        "namedBoundingBoxes": {
+          "description": "Class labels of the bounding boxes that failed the semantic filtering. Bounding box coordinates.",
+          "items": {
+            "$ref": "CloudAiLargeModelsVisionNamedBoundingBox"
+          },
+          "type": "array"
+        },
+        "passedSemanticFilter": {
+          "description": "This response is added when semantic filter config is turned on in EditConfig. It reports if this image is passed semantic filter response. If passed_semantic_filter is false, the bounding box information will be populated for user to check what caused the semantic filter to fail.",
+          "type": "boolean"
+        }
+      },
+      "type": "object"
+    },
+    "CloudAiLargeModelsVisionVideo": {
+      "description": "Video",
+      "id": "CloudAiLargeModelsVisionVideo",
+      "properties": {
+        "uri": {
+          "description": "Path to another storage (typically Google Cloud Storage).",
+          "type": "string"
+        },
+        "video": {
+          "description": "Raw bytes.",
+          "format": "byte",
+          "type": "string"
+        }
+      },
+      "type": "object"
+    },
+    "CloudAiNlLlmProtoServiceCandidate": {
+      "id": "CloudAiNlLlmProtoServiceCandidate",
+      "properties": {
+        "citationMetadata": {
+          "$ref": "CloudAiNlLlmProtoServiceCitationMetadata",
+          "description": "Source attribution of the generated content."
+        },
+        "content": {
+          "$ref": "CloudAiNlLlmProtoServiceContent",
+          "description": "Content of the candidate."
+        },
+        "finishMessage": {
+          "description": "A string that describes the filtering behavior in more detail. Only filled when reason is set.",
+          "type": "string"
+        },
+        "finishReason": {
+          "description": "The reason why the model stopped generating tokens.",
+          "enum": [
+            "FINISH_REASON_UNSPECIFIED",
+            "FINISH_REASON_STOP",
+            "FINISH_REASON_MAX_TOKENS",
+            "FINISH_REASON_SAFETY",
+            "FINISH_REASON_RECITATION",
+            "FINISH_REASON_OTHER"
+          ],
+          "enumDescriptions": [
+            "The finish reason is unspecified.",
+            "Natural stop point of the model or provided stop sequence.",
+            "The maximum number of tokens as specified in the request was reached.",
+            "The token generation was stopped as the response was flagged for safety reasons. NOTE: When streaming the Candidate.content will be empty if content filters blocked the output.",
+            "The token generation was stopped as the response was flagged for unauthorized citations.",
+            "All other reasons that stopped the token generation."
+          ],
+          "type": "string"
+        },
+        "index": {
+          "description": "Index of the candidate.",
+          "format": "int32",
+          "type": "integer"
+        },
+        "safetyRatings": {
+          "description": "Safety ratings of the generated content.",
+          "items": {
+            "$ref": "CloudAiNlLlmProtoServiceSafetyRating"
+          },
+          "type": "array"
+        }
+      },
+      "type": "object"
+    },
+    "CloudAiNlLlmProtoServiceCitation": {
+      "description": "Source attributions for content.",
+      "id": "CloudAiNlLlmProtoServiceCitation",
+      "properties": {
+        "endIndex": {
+          "description": "End index into the content.",
+          "format": "int32",
+          "type": "integer"
+        },
+        "license": {
+          "description": "License of the attribution.",
+          "type": "string"
+        },
+        "publicationDate": {
+          "$ref": "GoogleTypeDate",
+          "description": "Publication date of the attribution."
+        },
+        "startIndex": {
+          "description": "Start index into the content.",
+          "format": "int32",
+          "type": "integer"
+        },
+        "title": {
+          "description": "Title of the attribution.",
+          "type": "string"
+        },
+        "uri": {
+          "description": "Url reference of the attribution.",
+          "type": "string"
+        }
+      },
+      "type": "object"
+    },
+    "CloudAiNlLlmProtoServiceCitationMetadata": {
+      "description": "A collection of source attributions for a piece of content.",
+      "id": "CloudAiNlLlmProtoServiceCitationMetadata",
+      "properties": {
+        "citations": {
+          "description": "List of citations.",
+          "items": {
+            "$ref": "CloudAiNlLlmProtoServiceCitation"
+          },
+          "type": "array"
+        }
+      },
+      "type": "object"
+    },
+    "CloudAiNlLlmProtoServiceContent": {
+      "description": "The content of a single message from a participant.",
+      "id": "CloudAiNlLlmProtoServiceContent",
+      "properties": {
+        "parts": {
+          "description": "The parts of the message.",
+          "items": {
+            "$ref": "CloudAiNlLlmProtoServicePart"
+          },
+          "type": "array"
+        },
+        "role": {
+          "description": "The role of the current conversation participant.",
+          "type": "string"
+        }
+      },
+      "type": "object"
+    },
+    "CloudAiNlLlmProtoServiceFunctionCall": {
+      "description": "Function call details.",
+      "id": "CloudAiNlLlmProtoServiceFunctionCall",
+      "properties": {
+        "args": {
+          "additionalProperties": {
+            "description": "Properties of the object.",
+            "type": "any"
+          },
+          "description": "The function parameters and values in JSON format.",
+          "type": "object"
+        },
+        "name": {
+          "description": "Required. The name of the function to call.",
+          "type": "string"
+        }
+      },
+      "type": "object"
+    },
+    "CloudAiNlLlmProtoServiceFunctionResponse": {
+      "description": "Function response details.",
+      "id": "CloudAiNlLlmProtoServiceFunctionResponse",
+      "properties": {
+        "name": {
+          "description": "Required. The name of the function to call.",
+          "type": "string"
+        },
+        "response": {
+          "additionalProperties": {
+            "description": "Properties of the object.",
+            "type": "any"
+          },
+          "description": "Required. The function response in JSON object format.",
+          "type": "object"
+        }
+      },
+      "type": "object"
+    },
+    "CloudAiNlLlmProtoServiceGenerateMultiModalResponse": {
+      "id": "CloudAiNlLlmProtoServiceGenerateMultiModalResponse",
+      "properties": {
+        "candidates": {
+          "description": "Possible candidate responses to the conversation up until this point.",
+          "items": {
+            "$ref": "CloudAiNlLlmProtoServiceCandidate"
+          },
+          "type": "array"
+        },
+        "promptFeedback": {
+          "$ref": "CloudAiNlLlmProtoServicePromptFeedback",
+          "description": "Content filter results for a prompt sent in the request. Note: Sent only in the first stream chunk. Only happens when no candidates were generated due to content violations."
+        },
+        "reportingMetrics": {
+          "$ref": "IntelligenceCloudAutomlXpsReportingMetrics",
+          "description": "Billable prediction metrics."
+        },
+        "usageMetadata": {
+          "$ref": "CloudAiNlLlmProtoServiceUsageMetadata",
+          "description": "Usage metadata about the response(s)."
+        }
+      },
+      "type": "object"
+    },
+    "CloudAiNlLlmProtoServicePart": {
+      "description": "A single part of a message.",
+      "id": "CloudAiNlLlmProtoServicePart",
+      "properties": {
+        "fileData": {
+          "$ref": "CloudAiNlLlmProtoServicePartFileData",
+          "description": "URI-based data."
+        },
+        "functionCall": {
+          "$ref": "CloudAiNlLlmProtoServiceFunctionCall",
+          "description": "Function call data."
+        },
+        "functionResponse": {
+          "$ref": "CloudAiNlLlmProtoServiceFunctionResponse",
+          "description": "Function response data."
+        },
+        "inlineData": {
+          "$ref": "CloudAiNlLlmProtoServicePartBlob",
+          "description": "Inline bytes data"
+        },
+        "text": {
+          "description": "Text input.",
+          "type": "string"
+        },
+        "videoMetadata": {
+          "$ref": "CloudAiNlLlmProtoServicePartVideoMetadata",
+          "description": "Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data."
+        }
+      },
+      "type": "object"
+    },
+    "CloudAiNlLlmProtoServicePartBlob": {
+      "description": "Represents arbitrary blob data input.",
+      "id": "CloudAiNlLlmProtoServicePartBlob",
+      "properties": {
+        "data": {
+          "description": "Inline data.",
+          "format": "byte",
+          "type": "string"
+        },
+        "mimeType": {
+          "description": "The mime type corresponding to this input.",
+          "type": "string"
+        },
+        "originalFileData": {
+          "$ref": "CloudAiNlLlmProtoServicePartFileData",
+          "description": "Original file data where the blob comes from."
+        }
+      },
+      "type": "object"
+    },
+    "CloudAiNlLlmProtoServicePartFileData": {
+      "description": "Represents file data.",
+      "id": "CloudAiNlLlmProtoServicePartFileData",
+      "properties": {
+        "fileUri": {
+          "description": "Inline data.",
+          "type": "string"
+        },
+        "mimeType": {
+          "description": "The mime type corresponding to this input.",
+          "type": "string"
+        }
+      },
+      "type": "object"
+    },
+    "CloudAiNlLlmProtoServicePartVideoMetadata": {
+      "description": "Metadata describes the input video content.",
+      "id": "CloudAiNlLlmProtoServicePartVideoMetadata",
+      "properties": {
+        "endOffset": {
+          "description": "The end offset of the video.",
+          "format": "google-duration",
+          "type": "string"
+        },
+        "startOffset": {
+          "description": "The start offset of the video.",
+          "format": "google-duration",
+          "type": "string"
+        }
+      },
+      "type": "object"
+    },
+    "CloudAiNlLlmProtoServicePromptFeedback": {
+      "description": "Content filter results for a prompt sent in the request.",
+      "id": "CloudAiNlLlmProtoServicePromptFeedback",
+      "properties": {
+        "blockReason": {
+          "description": "Blocked reason.",
+          "enum": [
+            "BLOCKED_REASON_UNSPECIFIED",
+            "SAFETY",
+            "OTHER"
+          ],
+          "enumDescriptions": [
+            "Unspecified blocked reason.",
+            "Candidates blocked due to safety.",
+            "Candidates blocked due to other reason."
+          ],
+          "type": "string"
+        },
+        "blockReasonMessage": {
+          "description": "A readable block reason message.",
+          "type": "string"
+        },
+        "safetyRatings": {
+          "description": "Safety ratings.",
+          "items": {
+            "$ref": "CloudAiNlLlmProtoServiceSafetyRating"
+          },
+          "type": "array"
+        }
+      },
+      "type": "object"
+    },
+    "CloudAiNlLlmProtoServiceSafetyRating": {
+      "description": "Safety rating corresponding to the generated content.",
+      "id": "CloudAiNlLlmProtoServiceSafetyRating",
+      "properties": {
+        "blocked": {
+          "description": "Indicates whether the content was filtered out because of this rating.",
+          "type": "boolean"
+        },
+        "category": {
+          "description": "Harm category.",
+          "enum": [
+            "HARM_CATEGORY_UNSPECIFIED",
+            "HARM_CATEGORY_HATE_SPEECH",
+            "HARM_CATEGORY_DANGEROUS_CONTENT",
+            "HARM_CATEGORY_HARASSMENT",
+            "HARM_CATEGORY_SEXUALLY_EXPLICIT"
+          ],
+          "enumDescriptions": [
+            "The harm category is unspecified.",
+            "The harm category is hate speech.",
+            "The harm category is dengerous content.",
+            "The harm category is harassment.",
+            "The harm category is sexually explicit."
+          ],
+          "type": "string"
+        },
+        "probability": {
+          "description": "Harm probability levels in the content.",
+          "enum": [
+            "HARM_PROBABILITY_UNSPECIFIED",
+            "NEGLIGIBLE",
+            "LOW",
+            "MEDIUM",
+            "HIGH"
+          ],
+          "enumDescriptions": [
+            "Harm probability unspecified.",
+            "Negligible level of harm.",
+            "Low level of harm.",
+            "Medium level of harm.",
+            "High level of harm."
+          ],
+          "type": "string"
+        }
+      },
+      "type": "object"
+    },
+    "CloudAiNlLlmProtoServiceUsageMetadata": {
+      "description": "Usage metadata about response(s).",
+      "id": "CloudAiNlLlmProtoServiceUsageMetadata",
+      "properties": {
+        "candidatesTokenCount": {
+          "description": "Number of tokens in the response(s).",
+          "format": "int32",
+          "type": "integer"
+        },
+        "promptTokenCount": {
+          "description": "Number of tokens in the request.",
+          "format": "int32",
+          "type": "integer"
+        },
+        "totalTokenCount": {
+          "format": "int32",
+          "type": "integer"
+        }
+      },
+      "type": "object"
+    },
     "GoogleApiHttpBody": {
       "description": "Message that represents an arbitrary HTTP body. It should only be used for payload formats that can't be represented as JSON, such as raw binary or an HTML page. This message can be used both in streaming and non-streaming API methods in the request as well as the response. It can be used as a top-level request field, which is convenient if one wants to extract parameters from either the URL or HTTP template into the request fields and also want access to the raw HTTP body. Example: message GetResourceRequest { // A unique request id. string request_id = 1; // The raw HTTP body is bound to this field. google.api.HttpBody http_body = 2; } service ResourceService { rpc GetResource(GetResourceRequest) returns (google.api.HttpBody); rpc UpdateResource(google.api.HttpBody) returns (google.protobuf.Empty); } Example with streaming methods: service CaldavService { rpc GetCalendar(stream google.api.HttpBody) returns (stream google.api.HttpBody); rpc UpdateCalendar(stream google.api.HttpBody) returns (stream google.api.HttpBody); } Use of this type only changes how the request and response bodies are handled, all other features will continue to work unchanged.",
       "id": "GoogleApiHttpBody",
@@ -15531,14 +16255,14 @@
       "id": "GoogleCloudAiplatformV1BatchPredictionJobInstanceConfig",
       "properties": {
         "excludedFields": {
-          "description": "Fields that will be excluded in the prediction instance that is sent to the Model. Excluded will be attached to the batch prediction output if key_field is not specified. When excluded_fields is populated, included_fields must be empty. The input must be JSONL with objects at each line, CSV, BigQuery or TfRecord.",
+          "description": "Fields that will be excluded in the prediction instance that is sent to the Model. Excluded will be attached to the batch prediction output if key_field is not specified. When excluded_fields is populated, included_fields must be empty. The input must be JSONL with objects at each line, BigQuery or TfRecord.",
           "items": {
             "type": "string"
           },
           "type": "array"
         },
         "includedFields": {
-          "description": "Fields that will be included in the prediction instance that is sent to the Model. If instance_type is `array`, the order of field names in included_fields also determines the order of the values in the array. When included_fields is populated, excluded_fields must be empty. The input must be JSONL with objects at each line, CSV, BigQuery or TfRecord.",
+          "description": "Fields that will be included in the prediction instance that is sent to the Model. If instance_type is `array`, the order of field names in included_fields also determines the order of the values in the array. When included_fields is populated, excluded_fields must be empty. The input must be JSONL with objects at each line, BigQuery or TfRecord.",
           "items": {
             "type": "string"
           },
@@ -15721,16 +16445,16 @@
       "type": "object"
     },
     "GoogleCloudAiplatformV1Blob": {
-      "description": "Content blob.",
+      "description": "Raw media bytes. Text should not be sent as raw bytes, use the 'text' field.",
       "id": "GoogleCloudAiplatformV1Blob",
       "properties": {
         "data": {
-          "description": "Required. Data.",
+          "description": "Required. Raw bytes for media formats.",
           "format": "byte",
           "type": "string"
         },
         "mimeType": {
-          "description": "Required. Mime type of the data.",
+          "description": "Required. The IANA standard MIME type of the source data.",
           "type": "string"
         }
       },
@@ -15962,18 +16686,18 @@
       "type": "object"
     },
     "GoogleCloudAiplatformV1Content": {
-      "description": "A single turn in a conversation with the model.",
+      "description": "The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn.",
       "id": "GoogleCloudAiplatformV1Content",
       "properties": {
         "parts": {
-          "description": "Required. Ordered parts that make up a message. Parts may have different MIME types.",
+          "description": "Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types.",
           "items": {
             "$ref": "GoogleCloudAiplatformV1Part"
           },
           "type": "array"
         },
         "role": {
-          "description": "Optional. The role in a conversation associated with this content. Set it only if a content represents a turn in a conversations, otherwise no need to set role. Possible values: user, model.",
+          "description": "Optional. The producer of the content. Must be either 'user' or 'model'. Useful to set for multi-turn conversations, otherwise can be left blank or unset.",
           "type": "string"
         }
       },
@@ -16566,6 +17290,13 @@
           "description": "Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}`",
           "type": "string"
         },
+        "models": {
+          "description": "Optional. The name of the Model resources for which to generate a mapping to artifact URIs. Applicable only to some of the Google-provided custom jobs. Format: `projects/{project}/locations/{location}/models/{model}` In order to retrieve a specific version of the model, also provide the version ID or version alias. Example: `projects/{project}/locations/{location}/models/{model}@2` or `projects/{project}/locations/{location}/models/{model}@golden` If no version ID or alias is specified, the \"default\" version will be returned. The \"default\" version alias is created for the first version of the model, and can be moved to other versions later on. There will be exactly one default version.",
+          "items": {
+            "type": "string"
+          },
+          "type": "array"
+        },
         "network": {
           "description": "Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network.",
           "type": "string"
@@ -18423,7 +19154,7 @@
       "id": "GoogleCloudAiplatformV1ExportDataConfig",
       "properties": {
         "annotationSchemaUri": {
-          "description": "Only used for custom training data export use cases. Only applicable to Datasets that have DataItems and Annotations. Cloud Storage URI that points to a YAML file describing the annotation schema. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). The schema files that can be used here are found in gs://google-cloud-aiplatform/schema/dataset/annotation/, note that the chosen schema must be consistent with metadata of the Dataset specified by dataset_id. Only Annotations that both match this schema and belong to DataItems not ignored by the split method are used in respectively training, validation or test role, depending on the role of the DataItem they are on. When used in conjunction with annotations_filter, the Annotations used for training are filtered by both annotations_filter and annotation_schema_uri.",
+          "description": "The Cloud Storage URI that points to a YAML file describing the annotation schema. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). The schema files that can be used here are found in gs://google-cloud-aiplatform/schema/dataset/annotation/, note that the chosen schema must be consistent with metadata of the Dataset specified by dataset_id. Only used for custom training data export use cases. Only applicable to Datasets that have DataItems and Annotations. Only Annotations that both match this schema and belong to DataItems not ignored by the split method are used in respectively training, validation or test role, depending on the role of the DataItem they are on. When used in conjunction with annotations_filter, the Annotations used for training are filtered by both annotations_filter and annotation_schema_uri.",
           "type": "string"
         },
         "annotationsFilter": {
@@ -18455,7 +19186,7 @@
           "description": "The Google Cloud Storage location where the output is to be written to. In the given directory a new directory will be created with name: `export-data--` where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. All export output will be written into that directory. Inside that directory, annotations with the same schema will be grouped into sub directories which are named with the corresponding annotations' schema title. Inside these sub directories, a schema.yaml will be created to describe the output format."
         },
         "savedQueryId": {
-          "description": "Only used for custom training data export use cases. Only applicable to Datasets that have SavedQueries. The ID of a SavedQuery (annotation set) under the Dataset specified by dataset_id used for filtering Annotations for training. Only Annotations that are associated with this SavedQuery are used in respectively training. When used in conjunction with annotations_filter, the Annotations used for training are filtered by both saved_query_id and annotations_filter. Only one of saved_query_id and annotation_schema_uri should be specified as both of them represent the same thing: problem type.",
+          "description": "The ID of a SavedQuery (annotation set) under the Dataset specified by dataset_id used for filtering Annotations for training. Only used for custom training data export use cases. Only applicable to Datasets that have SavedQueries. Only Annotations that are associated with this SavedQuery are used in respectively training. When used in conjunction with annotations_filter, the Annotations used for training are filtered by both saved_query_id and annotations_filter. Only one of saved_query_id and annotation_schema_uri should be specified as both of them represent the same thing: problem type.",
           "type": "string"
         }
       },
@@ -19589,7 +20320,7 @@
           "type": "string"
         },
         "mimeType": {
-          "description": "Required. Mime type of the data.",
+          "description": "Required. The IANA standard MIME type of the source data.",
           "type": "string"
         }
       },
@@ -19737,44 +20468,6 @@
       },
       "type": "object"
     },
-    "GoogleCloudAiplatformV1FunctionCall": {
-      "description": "A predicted [FunctionCall] returned from the model that contains a string representing the [FunctionDeclaration.name] and a structured JSON object containing the parameters and their values.",
-      "id": "GoogleCloudAiplatformV1FunctionCall",
-      "properties": {
-        "args": {
-          "additionalProperties": {
-            "description": "Properties of the object.",
-            "type": "any"
-          },
-          "description": "Optional. Required. The function parameters and values in JSON object format. See [FunctionDeclaration.parameters] for parameter details.",
-          "type": "object"
-        },
-        "name": {
-          "description": "Required. The name of the function to call. Matches [FunctionDeclaration.name].",
-          "type": "string"
-        }
-      },
-      "type": "object"
-    },
-    "GoogleCloudAiplatformV1FunctionResponse": {
-      "description": "The result output from a [FunctionCall] that contains a string representing the [FunctionDeclaration.name] and a structured JSON object containing any output from the function is used as context to the model. This should contain the result of a [FunctionCall] made based on model prediction.",
-      "id": "GoogleCloudAiplatformV1FunctionResponse",
-      "properties": {
-        "name": {
-          "description": "Required. The name of the function to call. Matches [FunctionDeclaration.name] and [FunctionCall.name].",
-          "type": "string"
-        },
-        "response": {
-          "additionalProperties": {
-            "description": "Properties of the object.",
-            "type": "any"
-          },
-          "description": "Required. The function response in JSON object format.",
-          "type": "object"
-        }
-      },
-      "type": "object"
-    },
     "GoogleCloudAiplatformV1GcsDestination": {
       "description": "The Google Cloud Storage location where the output is to be written to.",
       "id": "GoogleCloudAiplatformV1GcsDestination",
@@ -22956,7 +23649,8 @@
             "CUSTOM",
             "BQML",
             "MODEL_GARDEN",
-            "GENIE"
+            "GENIE",
+            "CUSTOM_TEXT_EMBEDDING"
           ],
           "enumDescriptions": [
             "Should not be used.",
@@ -22964,7 +23658,8 @@
             "The Model is uploaded by user or custom training pipeline.",
             "The Model is registered and sync'ed from BigQuery ML.",
             "The Model is saved or tuned from Model Garden.",
-            "The Model is saved or tuned from Genie."
+            "The Model is saved or tuned from Genie.",
+            "The Model is uploaded by text embedding finetuning pipeline."
           ],
           "type": "string"
         }
@@ -23769,21 +24464,13 @@
       "type": "object"
     },
     "GoogleCloudAiplatformV1Part": {
-      "description": "Content part.",
+      "description": "A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes.",
       "id": "GoogleCloudAiplatformV1Part",
       "properties": {
         "fileData": {
           "$ref": "GoogleCloudAiplatformV1FileData",
           "description": "Optional. URI based data."
         },
-        "functionCall": {
-          "$ref": "GoogleCloudAiplatformV1FunctionCall",
-          "description": "Optional. A predicted [FunctionCall] returned from the model that contains a string representing the [FunctionDeclaration.name] with the parameters and their values."
-        },
-        "functionResponse": {
-          "$ref": "GoogleCloudAiplatformV1FunctionResponse",
-          "description": "Optional. The result output of a [FunctionCall] that contains a string representing the [FunctionDeclaration.name] and a structured JSON object containing any output from the function call. It is used as context to the model."
-        },
         "inlineData": {
           "$ref": "GoogleCloudAiplatformV1Blob",
           "description": "Optional. Inlined bytes data."
@@ -24596,6 +25283,20 @@
           "description": "Output only. Immutable. The version ID of the PublisherModel. A new version is committed when a new model version is uploaded under an existing model id. It is an auto-incrementing decimal number in string representation.",
           "readOnly": true,
           "type": "string"
+        },
+        "versionState": {
+          "description": "Optional. Indicates the state of the model version.",
+          "enum": [
+            "VERSION_STATE_UNSPECIFIED",
+            "VERSION_STATE_STABLE",
+            "VERSION_STATE_UNSTABLE"
+          ],
+          "enumDescriptions": [
+            "The version state is unspecified.",
+            "Used to indicate the version is stable.",
+            "Used to indicate the version is unstable."
+          ],
+          "type": "string"
         }
       },
       "type": "object"
@@ -24675,6 +25376,10 @@
           "description": "Optional. Default model display name.",
           "type": "string"
         },
+        "publicArtifactUri": {
+          "description": "Optional. The signed URI for ephemeral Cloud Storage access to model artifact.",
+          "type": "string"
+        },
         "sharedResources": {
           "description": "The resource name of the shared DeploymentResourcePool to deploy on. Format: `projects/{project}/locations/{location}/deploymentResourcePools/{deployment_resource_pool}`",
           "type": "string"
@@ -24741,6 +25446,10 @@
       "description": "Reference to a resource.",
       "id": "GoogleCloudAiplatformV1PublisherModelResourceReference",
       "properties": {
+        "description": {
+          "description": "Description of the resource.",
+          "type": "string"
+        },
         "resourceName": {
           "description": "The resource name of the Google Cloud resource.",
           "type": "string"
@@ -24748,6 +25457,10 @@
         "uri": {
           "description": "The URI of the resource.",
           "type": "string"
+        },
+        "useCase": {
+          "description": "Use case (CUJ) of the resource.",
+          "type": "string"
         }
       },
       "type": "object"
@@ -31719,6 +32432,28 @@
       },
       "type": "object"
     },
+    "GoogleTypeDate": {
+      "description": "Represents a whole or partial calendar date, such as a birthday. The time of day and time zone are either specified elsewhere or are insignificant. The date is relative to the Gregorian Calendar. This can represent one of the following: * A full date, with non-zero year, month, and day values. * A month and day, with a zero year (for example, an anniversary). * A year on its own, with a zero month and a zero day. * A year and month, with a zero day (for example, a credit card expiration date). Related types: * google.type.TimeOfDay * google.type.DateTime * google.protobuf.Timestamp",
+      "id": "GoogleTypeDate",
+      "properties": {
+        "day": {
+          "description": "Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant.",
+          "format": "int32",
+          "type": "integer"
+        },
+        "month": {
+          "description": "Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day.",
+          "format": "int32",
+          "type": "integer"
+        },
+        "year": {
+          "description": "Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year.",
+          "format": "int32",
+          "type": "integer"
+        }
+      },
+      "type": "object"
+    },
     "GoogleTypeExpr": {
       "description": "Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. The syntax and semantics of CEL are documented at https://github.com/google/cel-spec. Example (Comparison): title: \"Summary size limit\" description: \"Determines if a summary is less than 100 chars\" expression: \"document.summary.size() < 100\" Example (Equality): title: \"Requestor is owner\" description: \"Determines if requestor is the document owner\" expression: \"document.owner == request.auth.claims.email\" Example (Logic): title: \"Public documents\" description: \"Determine whether the document should be publicly visible\" expression: \"document.type != 'private' && document.type != 'internal'\" Example (Data Manipulation): title: \"Notification string\" description: \"Create a notification string with a timestamp.\" expression: \"'New message received at ' + string(document.create_time)\" The exact variables and functions that may be referenced within an expression are determined by the service that evaluates it. See the service documentation for additional information.",
       "id": "GoogleTypeExpr",
@@ -31779,6 +32514,70 @@
         }
       },
       "type": "object"
+    },
+    "IntelligenceCloudAutomlXpsMetricEntry": {
+      "id": "IntelligenceCloudAutomlXpsMetricEntry",
+      "properties": {
+        "argentumMetricId": {
+          "description": "For billing metrics that are using legacy sku's, set the legacy billing metric id here. This will be sent to Chemist as the \"cloudbilling.googleapis.com/argentum_metric_id\" label. Otherwise leave empty.",
+          "type": "string"
+        },
+        "doubleValue": {
+          "description": "A double value.",
+          "format": "double",
+          "type": "number"
+        },
+        "int64Value": {
+          "description": "A signed 64-bit integer value.",
+          "format": "int64",
+          "type": "string"
+        },
+        "metricName": {
+          "description": "The metric name defined in the service configuration.",
+          "type": "string"
+        },
+        "systemLabels": {
+          "description": "Billing system labels for this (metric, value) pair.",
+          "items": {
+            "$ref": "IntelligenceCloudAutomlXpsMetricEntryLabel"
+          },
+          "type": "array"
+        }
+      },
+      "type": "object"
+    },
+    "IntelligenceCloudAutomlXpsMetricEntryLabel": {
+      "id": "IntelligenceCloudAutomlXpsMetricEntryLabel",
+      "properties": {
+        "labelName": {
+          "description": "The name of the label.",
+          "type": "string"
+        },
+        "labelValue": {
+          "description": "The value of the label.",
+          "type": "string"
+        }
+      },
+      "type": "object"
+    },
+    "IntelligenceCloudAutomlXpsReportingMetrics": {
+      "id": "IntelligenceCloudAutomlXpsReportingMetrics",
+      "properties": {
+        "effectiveTrainingDuration": {
+          "deprecated": true,
+          "description": "The effective time training used. If set, this is used for quota management and billing. Deprecated. AutoML BE doesn't use this. Don't set.",
+          "format": "google-duration",
+          "type": "string"
+        },
+        "metricEntries": {
+          "description": "One entry per metric name. The values must be aggregated per metric name.",
+          "items": {
+            "$ref": "IntelligenceCloudAutomlXpsMetricEntry"
+          },
+          "type": "array"
+        }
+      },
+      "type": "object"
     }
   },
   "servicePath": "",
diff --git a/googleapiclient/discovery_cache/documents/aiplatform.v1beta1.json b/googleapiclient/discovery_cache/documents/aiplatform.v1beta1.json
index 83c36bdb6a2..5ab4ab0d2f6 100644
--- a/googleapiclient/discovery_cache/documents/aiplatform.v1beta1.json
+++ b/googleapiclient/discovery_cache/documents/aiplatform.v1beta1.json
@@ -12818,6 +12818,37 @@
                         "https://www.googleapis.com/auth/cloud-platform"
                       ]
                     },
+                    "getIamPolicy": {
+                      "description": "Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.",
+                      "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/publishers/{publishersId}/models/{modelsId}:getIamPolicy",
+                      "httpMethod": "POST",
+                      "id": "aiplatform.projects.locations.publishers.models.getIamPolicy",
+                      "parameterOrder": [
+                        "resource"
+                      ],
+                      "parameters": {
+                        "options.requestedPolicyVersion": {
+                          "description": "Optional. The maximum policy version that will be used to format the policy. Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected. Requests for policies with any conditional role bindings must specify version 3. Policies with no conditional role bindings may specify any valid value or leave the field unset. The policy in the response might use the policy version that you specified, or it might use a lower policy version. For example, if you specify version 3, but the policy has no conditional role bindings, the response uses version 1. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).",
+                          "format": "int32",
+                          "location": "query",
+                          "type": "integer"
+                        },
+                        "resource": {
+                          "description": "REQUIRED: The resource for which the policy is being requested. See [Resource names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.",
+                          "location": "path",
+                          "pattern": "^projects/[^/]+/locations/[^/]+/publishers/[^/]+/models/[^/]+$",
+                          "required": true,
+                          "type": "string"
+                        }
+                      },
+                      "path": "v1beta1/{+resource}:getIamPolicy",
+                      "response": {
+                        "$ref": "GoogleIamV1Policy"
+                      },
+                      "scopes": [
+                        "https://www.googleapis.com/auth/cloud-platform"
+                      ]
+                    },
                     "predict": {
                       "description": "Perform an online prediction.",
                       "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/publishers/{publishersId}/models/{modelsId}:predict",
@@ -16375,135 +16406,859 @@
             }
           }
         }
-      }
+      }
+    },
+    "publishers": {
+      "resources": {
+        "models": {
+          "methods": {
+            "get": {
+              "description": "Gets a Model Garden publisher model.",
+              "flatPath": "v1beta1/publishers/{publishersId}/models/{modelsId}",
+              "httpMethod": "GET",
+              "id": "aiplatform.publishers.models.get",
+              "parameterOrder": [
+                "name"
+              ],
+              "parameters": {
+                "languageCode": {
+                  "description": "Optional. The IETF BCP-47 language code representing the language in which the publisher model's text information should be written in (see go/bcp47).",
+                  "location": "query",
+                  "type": "string"
+                },
+                "name": {
+                  "description": "Required. The name of the PublisherModel resource. Format: `publishers/{publisher}/models/{publisher_model}`",
+                  "location": "path",
+                  "pattern": "^publishers/[^/]+/models/[^/]+$",
+                  "required": true,
+                  "type": "string"
+                },
+                "view": {
+                  "description": "Optional. PublisherModel view specifying which fields to read.",
+                  "enum": [
+                    "PUBLISHER_MODEL_VIEW_UNSPECIFIED",
+                    "PUBLISHER_MODEL_VIEW_BASIC",
+                    "PUBLISHER_MODEL_VIEW_FULL",
+                    "PUBLISHER_MODEL_VERSION_VIEW_BASIC"
+                  ],
+                  "enumDescriptions": [
+                    "The default / unset value. The API will default to the BASIC view.",
+                    "Include basic metadata about the publisher model, but not the full contents.",
+                    "Include everything.",
+                    "Include: VersionId, ModelVersionExternalName, and SupportedActions."
+                  ],
+                  "location": "query",
+                  "type": "string"
+                }
+              },
+              "path": "v1beta1/{+name}",
+              "response": {
+                "$ref": "GoogleCloudAiplatformV1beta1PublisherModel"
+              },
+              "scopes": [
+                "https://www.googleapis.com/auth/cloud-platform"
+              ]
+            },
+            "list": {
+              "description": "Lists publisher models in Model Garden.",
+              "flatPath": "v1beta1/publishers/{publishersId}/models",
+              "httpMethod": "GET",
+              "id": "aiplatform.publishers.models.list",
+              "parameterOrder": [
+                "parent"
+              ],
+              "parameters": {
+                "filter": {
+                  "description": "Optional. The standard list filter.",
+                  "location": "query",
+                  "type": "string"
+                },
+                "languageCode": {
+                  "description": "Optional. The IETF BCP-47 language code representing the language in which the publisher models' text information should be written in (see go/bcp47). If not set, by default English (en).",
+                  "location": "query",
+                  "type": "string"
+                },
+                "orderBy": {
+                  "description": "Optional. A comma-separated list of fields to order by, sorted in ascending order. Use \"desc\" after a field name for descending.",
+                  "location": "query",
+                  "type": "string"
+                },
+                "pageSize": {
+                  "description": "Optional. The standard list page size.",
+                  "format": "int32",
+                  "location": "query",
+                  "type": "integer"
+                },
+                "pageToken": {
+                  "description": "Optional. The standard list page token. Typically obtained via ListPublisherModelsResponse.next_page_token of the previous ModelGardenService.ListPublisherModels call.",
+                  "location": "query",
+                  "type": "string"
+                },
+                "parent": {
+                  "description": "Required. The name of the Publisher from which to list the PublisherModels. Format: `publishers/{publisher}`",
+                  "location": "path",
+                  "pattern": "^publishers/[^/]+$",
+                  "required": true,
+                  "type": "string"
+                },
+                "view": {
+                  "description": "Optional. PublisherModel view specifying which fields to read.",
+                  "enum": [
+                    "PUBLISHER_MODEL_VIEW_UNSPECIFIED",
+                    "PUBLISHER_MODEL_VIEW_BASIC",
+                    "PUBLISHER_MODEL_VIEW_FULL",
+                    "PUBLISHER_MODEL_VERSION_VIEW_BASIC"
+                  ],
+                  "enumDescriptions": [
+                    "The default / unset value. The API will default to the BASIC view.",
+                    "Include basic metadata about the publisher model, but not the full contents.",
+                    "Include everything.",
+                    "Include: VersionId, ModelVersionExternalName, and SupportedActions."
+                  ],
+                  "location": "query",
+                  "type": "string"
+                }
+              },
+              "path": "v1beta1/{+parent}/models",
+              "response": {
+                "$ref": "GoogleCloudAiplatformV1beta1ListPublisherModelsResponse"
+              },
+              "scopes": [
+                "https://www.googleapis.com/auth/cloud-platform"
+              ]
+            }
+          }
+        }
+      }
+    }
+  },
+  "revision": "20231211",
+  "rootUrl": "https://aiplatform.googleapis.com/",
+  "schemas": {
+    "CloudAiLargeModelsVisionEmbedVideoResponse": {
+      "description": "Video embedding response.",
+      "id": "CloudAiLargeModelsVisionEmbedVideoResponse",
+      "properties": {
+        "videoEmbeddings": {
+          "description": "The embedding vector for the video.",
+          "items": {
+            "type": "any"
+          },
+          "type": "array"
+        }
+      },
+      "type": "object"
+    },
+    "CloudAiLargeModelsVisionFilteredText": {
+      "description": "Details for filtered input text.",
+      "id": "CloudAiLargeModelsVisionFilteredText",
+      "properties": {
+        "category": {
+          "description": "Confidence level",
+          "enum": [
+            "RAI_CATEGORY_UNSPECIFIED",
+            "OBSCENE",
+            "SEXUALLY_EXPLICIT",
+            "IDENTITY_ATTACK",
+            "VIOLENCE_ABUSE",
+            "CSAI",
+            "SPII",
+            "CELEBRITY",
+            "FACE_IMG",
+            "WATERMARK_IMG",
+            "MEMORIZATION_IMG",
+            "CSAI_IMG",
+            "PORN_IMG",
+            "VIOLENCE_IMG",
+            "CHILD_IMG",
+            "TOXIC",
+            "SENSITIVE_WORD",
+            "PERSON_IMG",
+            "ICA_IMG",
+            "SEXUAL_IMG",
+            "IU_IMG",
+            "RACY_IMG",
+            "PEDO_IMG",
+            "DEATH_HARM_TRAGEDY",
+            "HEALTH",
+            "FIREARMS_WEAPONS",
+            "RELIGIOUS_BELIEF",
+            "ILLICIT_DRUGS",
+            "WAR_CONFLICT",
+            "POLITICS",
+            "HATE_SYMBOL_IMG",
+            "CHILD_TEXT",
+            "DANGEROUS_CONTENT",
+            "RECITATION_TEXT"
+          ],
+          "enumDescriptions": [
+            "",
+            "",
+            "Porn",
+            "Hate",
+            "",
+            "",
+            "",
+            "",
+            "",
+            "",
+            "",
+            "",
+            "",
+            "",
+            "",
+            "",
+            "",
+            "",
+            "",
+            "",
+            "",
+            "",
+            "",
+            "SafetyAttributes returned but not filtered on",
+            "",
+            "",
+            "",
+            "",
+            "",
+            "",
+            "End of list",
+            "",
+            "Text category from SafetyCat v3",
+            ""
+          ],
+          "type": "string"
+        },
+        "confidence": {
+          "description": "Filtered category",
+          "enum": [
+            "CONFIDENCE_UNSPECIFIED",
+            "CONFIDENCE_LOW",
+            "CONFIDENCE_MEDIUM",
+            "CONFIDENCE_HIGH"
+          ],
+          "enumDescriptions": [
+            "",
+            "",
+            "",
+            ""
+          ],
+          "type": "string"
+        },
+        "prompt": {
+          "description": "Input prompt",
+          "type": "string"
+        },
+        "score": {
+          "description": "Score for category",
+          "format": "double",
+          "type": "number"
+        }
+      },
+      "type": "object"
+    },
+    "CloudAiLargeModelsVisionGenerateVideoResponse": {
+      "description": "Generate video response.",
+      "id": "CloudAiLargeModelsVisionGenerateVideoResponse",
+      "properties": {
+        "generatedSamples": {
+          "description": "The generates samples.",
+          "items": {
+            "$ref": "CloudAiLargeModelsVisionMedia"
+          },
+          "type": "array"
+        },
+        "raiMediaFilteredCount": {
+          "description": "Returns if any videos were filtered due to RAI policies.",
+          "format": "int32",
+          "type": "integer"
+        },
+        "raiMediaFilteredReasons": {
+          "description": "Returns rai failure reasons if any.",
+          "items": {
+            "type": "string"
+          },
+          "type": "array"
+        },
+        "raiTextFilteredReason": {
+          "$ref": "CloudAiLargeModelsVisionFilteredText",
+          "description": "Returns filtered text rai info."
+        }
+      },
+      "type": "object"
+    },
+    "CloudAiLargeModelsVisionImage": {
+      "description": "Image.",
+      "id": "CloudAiLargeModelsVisionImage",
+      "properties": {
+        "encoding": {
+          "description": "Image encoding, encoded as \"image/png\" or \"image/jpg\".",
+          "type": "string"
+        },
+        "image": {
+          "description": "Raw bytes.",
+          "format": "byte",
+          "type": "string"
+        },
+        "imageRaiScores": {
+          "$ref": "CloudAiLargeModelsVisionImageRAIScores",
+          "description": "RAI scores for generated image."
+        },
+        "raiInfo": {
+          "$ref": "CloudAiLargeModelsVisionRaiInfo",
+          "description": "RAI info for image"
+        },
+        "semanticFilterResponse": {
+          "$ref": "CloudAiLargeModelsVisionSemanticFilterResponse",
+          "description": "Semantic filter info for image."
+        },
+        "uri": {
+          "description": "Path to another storage (typically Google Cloud Storage).",
+          "type": "string"
+        }
+      },
+      "type": "object"
+    },
+    "CloudAiLargeModelsVisionImageRAIScores": {
+      "description": "RAI scores for generated image returned.",
+      "id": "CloudAiLargeModelsVisionImageRAIScores",
+      "properties": {
+        "agileWatermarkDetectionScore": {
+          "description": "Agile watermark score for image.",
+          "format": "double",
+          "type": "number"
+        }
+      },
+      "type": "object"
+    },
+    "CloudAiLargeModelsVisionMedia": {
+      "description": "Media.",
+      "id": "CloudAiLargeModelsVisionMedia",
+      "properties": {
+        "image": {
+          "$ref": "CloudAiLargeModelsVisionImage",
+          "description": "Image."
+        },
+        "video": {
+          "$ref": "CloudAiLargeModelsVisionVideo",
+          "description": "Video"
+        }
+      },
+      "type": "object"
+    },
+    "CloudAiLargeModelsVisionMediaGenerateContentResponse": {
+      "description": "Generate media content response",
+      "id": "CloudAiLargeModelsVisionMediaGenerateContentResponse",
+      "properties": {
+        "response": {
+          "$ref": "CloudAiNlLlmProtoServiceGenerateMultiModalResponse",
+          "description": "Response to the user's request."
+        }
+      },
+      "type": "object"
+    },
+    "CloudAiLargeModelsVisionNamedBoundingBox": {
+      "id": "CloudAiLargeModelsVisionNamedBoundingBox",
+      "properties": {
+        "classes": {
+          "items": {
+            "type": "string"
+          },
+          "type": "array"
+        },
+        "entities": {
+          "items": {
+            "type": "string"
+          },
+          "type": "array"
+        },
+        "scores": {
+          "items": {
+            "format": "float",
+            "type": "number"
+          },
+          "type": "array"
+        },
+        "x1": {
+          "format": "float",
+          "type": "number"
+        },
+        "x2": {
+          "format": "float",
+          "type": "number"
+        },
+        "y1": {
+          "format": "float",
+          "type": "number"
+        },
+        "y2": {
+          "format": "float",
+          "type": "number"
+        }
+      },
+      "type": "object"
+    },
+    "CloudAiLargeModelsVisionRaiInfo": {
+      "id": "CloudAiLargeModelsVisionRaiInfo",
+      "properties": {
+        "raiCategories": {
+          "description": "List of rai categories' information to return",
+          "items": {
+            "type": "string"
+          },
+          "type": "array"
+        },
+        "scores": {
+          "description": "List of rai scores mapping to the rai categories. Rounded to 1 decimal place.",
+          "items": {
+            "format": "float",
+            "type": "number"
+          },
+          "type": "array"
+        }
+      },
+      "type": "object"
+    },
+    "CloudAiLargeModelsVisionReasonVideoResponse": {
+      "description": "Video reasoning response.",
+      "id": "CloudAiLargeModelsVisionReasonVideoResponse",
+      "properties": {
+        "responses": {
+          "description": "Generated text responses. The generated responses for different segments within the same video.",
+          "items": {
+            "$ref": "CloudAiLargeModelsVisionReasonVideoResponseTextResponse"
+          },
+          "type": "array"
+        }
+      },
+      "type": "object"
+    },
+    "CloudAiLargeModelsVisionReasonVideoResponseTextResponse": {
+      "description": "Contains text that is the response of the video captioning.",
+      "id": "CloudAiLargeModelsVisionReasonVideoResponseTextResponse",
+      "properties": {
+        "relativeTemporalPartition": {
+          "$ref": "CloudAiLargeModelsVisionRelativeTemporalPartition",
+          "description": "Partition of the caption's video in time. This field is intended for video captioning. To represent the start time and end time of the caption's video."
+        },
+        "text": {
+          "description": "Text information",
+          "type": "string"
+        }
+      },
+      "type": "object"
+    },
+    "CloudAiLargeModelsVisionRelativeTemporalPartition": {
+      "description": "For ease of use, assume that the start_offset is inclusive and the end_offset is exclusive. In mathematical terms, the partition would be written as [start_offset, end_offset).",
+      "id": "CloudAiLargeModelsVisionRelativeTemporalPartition",
+      "properties": {
+        "endOffset": {
+          "description": "End time offset of the partition.",
+          "format": "google-duration",
+          "type": "string"
+        },
+        "startOffset": {
+          "description": "Start time offset of the partition.",
+          "format": "google-duration",
+          "type": "string"
+        }
+      },
+      "type": "object"
+    },
+    "CloudAiLargeModelsVisionSemanticFilterResponse": {
+      "id": "CloudAiLargeModelsVisionSemanticFilterResponse",
+      "properties": {
+        "namedBoundingBoxes": {
+          "description": "Class labels of the bounding boxes that failed the semantic filtering. Bounding box coordinates.",
+          "items": {
+            "$ref": "CloudAiLargeModelsVisionNamedBoundingBox"
+          },
+          "type": "array"
+        },
+        "passedSemanticFilter": {
+          "description": "This response is added when semantic filter config is turned on in EditConfig. It reports if this image is passed semantic filter response. If passed_semantic_filter is false, the bounding box information will be populated for user to check what caused the semantic filter to fail.",
+          "type": "boolean"
+        }
+      },
+      "type": "object"
+    },
+    "CloudAiLargeModelsVisionVideo": {
+      "description": "Video",
+      "id": "CloudAiLargeModelsVisionVideo",
+      "properties": {
+        "uri": {
+          "description": "Path to another storage (typically Google Cloud Storage).",
+          "type": "string"
+        },
+        "video": {
+          "description": "Raw bytes.",
+          "format": "byte",
+          "type": "string"
+        }
+      },
+      "type": "object"
+    },
+    "CloudAiNlLlmProtoServiceCandidate": {
+      "id": "CloudAiNlLlmProtoServiceCandidate",
+      "properties": {
+        "citationMetadata": {
+          "$ref": "CloudAiNlLlmProtoServiceCitationMetadata",
+          "description": "Source attribution of the generated content."
+        },
+        "content": {
+          "$ref": "CloudAiNlLlmProtoServiceContent",
+          "description": "Content of the candidate."
+        },
+        "finishMessage": {
+          "description": "A string that describes the filtering behavior in more detail. Only filled when reason is set.",
+          "type": "string"
+        },
+        "finishReason": {
+          "description": "The reason why the model stopped generating tokens.",
+          "enum": [
+            "FINISH_REASON_UNSPECIFIED",
+            "FINISH_REASON_STOP",
+            "FINISH_REASON_MAX_TOKENS",
+            "FINISH_REASON_SAFETY",
+            "FINISH_REASON_RECITATION",
+            "FINISH_REASON_OTHER"
+          ],
+          "enumDescriptions": [
+            "The finish reason is unspecified.",
+            "Natural stop point of the model or provided stop sequence.",
+            "The maximum number of tokens as specified in the request was reached.",
+            "The token generation was stopped as the response was flagged for safety reasons. NOTE: When streaming the Candidate.content will be empty if content filters blocked the output.",
+            "The token generation was stopped as the response was flagged for unauthorized citations.",
+            "All other reasons that stopped the token generation."
+          ],
+          "type": "string"
+        },
+        "index": {
+          "description": "Index of the candidate.",
+          "format": "int32",
+          "type": "integer"
+        },
+        "safetyRatings": {
+          "description": "Safety ratings of the generated content.",
+          "items": {
+            "$ref": "CloudAiNlLlmProtoServiceSafetyRating"
+          },
+          "type": "array"
+        }
+      },
+      "type": "object"
+    },
+    "CloudAiNlLlmProtoServiceCitation": {
+      "description": "Source attributions for content.",
+      "id": "CloudAiNlLlmProtoServiceCitation",
+      "properties": {
+        "endIndex": {
+          "description": "End index into the content.",
+          "format": "int32",
+          "type": "integer"
+        },
+        "license": {
+          "description": "License of the attribution.",
+          "type": "string"
+        },
+        "publicationDate": {
+          "$ref": "GoogleTypeDate",
+          "description": "Publication date of the attribution."
+        },
+        "startIndex": {
+          "description": "Start index into the content.",
+          "format": "int32",
+          "type": "integer"
+        },
+        "title": {
+          "description": "Title of the attribution.",
+          "type": "string"
+        },
+        "uri": {
+          "description": "Url reference of the attribution.",
+          "type": "string"
+        }
+      },
+      "type": "object"
+    },
+    "CloudAiNlLlmProtoServiceCitationMetadata": {
+      "description": "A collection of source attributions for a piece of content.",
+      "id": "CloudAiNlLlmProtoServiceCitationMetadata",
+      "properties": {
+        "citations": {
+          "description": "List of citations.",
+          "items": {
+            "$ref": "CloudAiNlLlmProtoServiceCitation"
+          },
+          "type": "array"
+        }
+      },
+      "type": "object"
+    },
+    "CloudAiNlLlmProtoServiceContent": {
+      "description": "The content of a single message from a participant.",
+      "id": "CloudAiNlLlmProtoServiceContent",
+      "properties": {
+        "parts": {
+          "description": "The parts of the message.",
+          "items": {
+            "$ref": "CloudAiNlLlmProtoServicePart"
+          },
+          "type": "array"
+        },
+        "role": {
+          "description": "The role of the current conversation participant.",
+          "type": "string"
+        }
+      },
+      "type": "object"
+    },
+    "CloudAiNlLlmProtoServiceFunctionCall": {
+      "description": "Function call details.",
+      "id": "CloudAiNlLlmProtoServiceFunctionCall",
+      "properties": {
+        "args": {
+          "additionalProperties": {
+            "description": "Properties of the object.",
+            "type": "any"
+          },
+          "description": "The function parameters and values in JSON format.",
+          "type": "object"
+        },
+        "name": {
+          "description": "Required. The name of the function to call.",
+          "type": "string"
+        }
+      },
+      "type": "object"
+    },
+    "CloudAiNlLlmProtoServiceFunctionResponse": {
+      "description": "Function response details.",
+      "id": "CloudAiNlLlmProtoServiceFunctionResponse",
+      "properties": {
+        "name": {
+          "description": "Required. The name of the function to call.",
+          "type": "string"
+        },
+        "response": {
+          "additionalProperties": {
+            "description": "Properties of the object.",
+            "type": "any"
+          },
+          "description": "Required. The function response in JSON object format.",
+          "type": "object"
+        }
+      },
+      "type": "object"
+    },
+    "CloudAiNlLlmProtoServiceGenerateMultiModalResponse": {
+      "id": "CloudAiNlLlmProtoServiceGenerateMultiModalResponse",
+      "properties": {
+        "candidates": {
+          "description": "Possible candidate responses to the conversation up until this point.",
+          "items": {
+            "$ref": "CloudAiNlLlmProtoServiceCandidate"
+          },
+          "type": "array"
+        },
+        "promptFeedback": {
+          "$ref": "CloudAiNlLlmProtoServicePromptFeedback",
+          "description": "Content filter results for a prompt sent in the request. Note: Sent only in the first stream chunk. Only happens when no candidates were generated due to content violations."
+        },
+        "reportingMetrics": {
+          "$ref": "IntelligenceCloudAutomlXpsReportingMetrics",
+          "description": "Billable prediction metrics."
+        },
+        "usageMetadata": {
+          "$ref": "CloudAiNlLlmProtoServiceUsageMetadata",
+          "description": "Usage metadata about the response(s)."
+        }
+      },
+      "type": "object"
+    },
+    "CloudAiNlLlmProtoServicePart": {
+      "description": "A single part of a message.",
+      "id": "CloudAiNlLlmProtoServicePart",
+      "properties": {
+        "fileData": {
+          "$ref": "CloudAiNlLlmProtoServicePartFileData",
+          "description": "URI-based data."
+        },
+        "functionCall": {
+          "$ref": "CloudAiNlLlmProtoServiceFunctionCall",
+          "description": "Function call data."
+        },
+        "functionResponse": {
+          "$ref": "CloudAiNlLlmProtoServiceFunctionResponse",
+          "description": "Function response data."
+        },
+        "inlineData": {
+          "$ref": "CloudAiNlLlmProtoServicePartBlob",
+          "description": "Inline bytes data"
+        },
+        "text": {
+          "description": "Text input.",
+          "type": "string"
+        },
+        "videoMetadata": {
+          "$ref": "CloudAiNlLlmProtoServicePartVideoMetadata",
+          "description": "Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data."
+        }
+      },
+      "type": "object"
+    },
+    "CloudAiNlLlmProtoServicePartBlob": {
+      "description": "Represents arbitrary blob data input.",
+      "id": "CloudAiNlLlmProtoServicePartBlob",
+      "properties": {
+        "data": {
+          "description": "Inline data.",
+          "format": "byte",
+          "type": "string"
+        },
+        "mimeType": {
+          "description": "The mime type corresponding to this input.",
+          "type": "string"
+        },
+        "originalFileData": {
+          "$ref": "CloudAiNlLlmProtoServicePartFileData",
+          "description": "Original file data where the blob comes from."
+        }
+      },
+      "type": "object"
+    },
+    "CloudAiNlLlmProtoServicePartFileData": {
+      "description": "Represents file data.",
+      "id": "CloudAiNlLlmProtoServicePartFileData",
+      "properties": {
+        "fileUri": {
+          "description": "Inline data.",
+          "type": "string"
+        },
+        "mimeType": {
+          "description": "The mime type corresponding to this input.",
+          "type": "string"
+        }
+      },
+      "type": "object"
+    },
+    "CloudAiNlLlmProtoServicePartVideoMetadata": {
+      "description": "Metadata describes the input video content.",
+      "id": "CloudAiNlLlmProtoServicePartVideoMetadata",
+      "properties": {
+        "endOffset": {
+          "description": "The end offset of the video.",
+          "format": "google-duration",
+          "type": "string"
+        },
+        "startOffset": {
+          "description": "The start offset of the video.",
+          "format": "google-duration",
+          "type": "string"
+        }
+      },
+      "type": "object"
+    },
+    "CloudAiNlLlmProtoServicePromptFeedback": {
+      "description": "Content filter results for a prompt sent in the request.",
+      "id": "CloudAiNlLlmProtoServicePromptFeedback",
+      "properties": {
+        "blockReason": {
+          "description": "Blocked reason.",
+          "enum": [
+            "BLOCKED_REASON_UNSPECIFIED",
+            "SAFETY",
+            "OTHER"
+          ],
+          "enumDescriptions": [
+            "Unspecified blocked reason.",
+            "Candidates blocked due to safety.",
+            "Candidates blocked due to other reason."
+          ],
+          "type": "string"
+        },
+        "blockReasonMessage": {
+          "description": "A readable block reason message.",
+          "type": "string"
+        },
+        "safetyRatings": {
+          "description": "Safety ratings.",
+          "items": {
+            "$ref": "CloudAiNlLlmProtoServiceSafetyRating"
+          },
+          "type": "array"
+        }
+      },
+      "type": "object"
+    },
+    "CloudAiNlLlmProtoServiceSafetyRating": {
+      "description": "Safety rating corresponding to the generated content.",
+      "id": "CloudAiNlLlmProtoServiceSafetyRating",
+      "properties": {
+        "blocked": {
+          "description": "Indicates whether the content was filtered out because of this rating.",
+          "type": "boolean"
+        },
+        "category": {
+          "description": "Harm category.",
+          "enum": [
+            "HARM_CATEGORY_UNSPECIFIED",
+            "HARM_CATEGORY_HATE_SPEECH",
+            "HARM_CATEGORY_DANGEROUS_CONTENT",
+            "HARM_CATEGORY_HARASSMENT",
+            "HARM_CATEGORY_SEXUALLY_EXPLICIT"
+          ],
+          "enumDescriptions": [
+            "The harm category is unspecified.",
+            "The harm category is hate speech.",
+            "The harm category is dengerous content.",
+            "The harm category is harassment.",
+            "The harm category is sexually explicit."
+          ],
+          "type": "string"
+        },
+        "probability": {
+          "description": "Harm probability levels in the content.",
+          "enum": [
+            "HARM_PROBABILITY_UNSPECIFIED",
+            "NEGLIGIBLE",
+            "LOW",
+            "MEDIUM",
+            "HIGH"
+          ],
+          "enumDescriptions": [
+            "Harm probability unspecified.",
+            "Negligible level of harm.",
+            "Low level of harm.",
+            "Medium level of harm.",
+            "High level of harm."
+          ],
+          "type": "string"
+        }
+      },
+      "type": "object"
     },
-    "publishers": {
-      "resources": {
-        "models": {
-          "methods": {
-            "get": {
-              "description": "Gets a Model Garden publisher model.",
-              "flatPath": "v1beta1/publishers/{publishersId}/models/{modelsId}",
-              "httpMethod": "GET",
-              "id": "aiplatform.publishers.models.get",
-              "parameterOrder": [
-                "name"
-              ],
-              "parameters": {
-                "languageCode": {
-                  "description": "Optional. The IETF BCP-47 language code representing the language in which the publisher model's text information should be written in (see go/bcp47).",
-                  "location": "query",
-                  "type": "string"
-                },
-                "name": {
-                  "description": "Required. The name of the PublisherModel resource. Format: `publishers/{publisher}/models/{publisher_model}`",
-                  "location": "path",
-                  "pattern": "^publishers/[^/]+/models/[^/]+$",
-                  "required": true,
-                  "type": "string"
-                },
-                "view": {
-                  "description": "Optional. PublisherModel view specifying which fields to read.",
-                  "enum": [
-                    "PUBLISHER_MODEL_VIEW_UNSPECIFIED",
-                    "PUBLISHER_MODEL_VIEW_BASIC",
-                    "PUBLISHER_MODEL_VIEW_FULL",
-                    "PUBLISHER_MODEL_VERSION_VIEW_BASIC"
-                  ],
-                  "enumDescriptions": [
-                    "The default / unset value. The API will default to the BASIC view.",
-                    "Include basic metadata about the publisher model, but not the full contents.",
-                    "Include everything.",
-                    "Include: VersionId, ModelVersionExternalName, and SupportedActions."
-                  ],
-                  "location": "query",
-                  "type": "string"
-                }
-              },
-              "path": "v1beta1/{+name}",
-              "response": {
-                "$ref": "GoogleCloudAiplatformV1beta1PublisherModel"
-              },
-              "scopes": [
-                "https://www.googleapis.com/auth/cloud-platform"
-              ]
-            },
-            "list": {
-              "description": "Lists publisher models in Model Garden.",
-              "flatPath": "v1beta1/publishers/{publishersId}/models",
-              "httpMethod": "GET",
-              "id": "aiplatform.publishers.models.list",
-              "parameterOrder": [
-                "parent"
-              ],
-              "parameters": {
-                "filter": {
-                  "description": "Optional. The standard list filter.",
-                  "location": "query",
-                  "type": "string"
-                },
-                "languageCode": {
-                  "description": "Optional. The IETF BCP-47 language code representing the language in which the publisher models' text information should be written in (see go/bcp47). If not set, by default English (en).",
-                  "location": "query",
-                  "type": "string"
-                },
-                "orderBy": {
-                  "description": "Optional. A comma-separated list of fields to order by, sorted in ascending order. Use \"desc\" after a field name for descending.",
-                  "location": "query",
-                  "type": "string"
-                },
-                "pageSize": {
-                  "description": "Optional. The standard list page size.",
-                  "format": "int32",
-                  "location": "query",
-                  "type": "integer"
-                },
-                "pageToken": {
-                  "description": "Optional. The standard list page token. Typically obtained via ListPublisherModelsResponse.next_page_token of the previous ModelGardenService.ListPublisherModels call.",
-                  "location": "query",
-                  "type": "string"
-                },
-                "parent": {
-                  "description": "Required. The name of the Publisher from which to list the PublisherModels. Format: `publishers/{publisher}`",
-                  "location": "path",
-                  "pattern": "^publishers/[^/]+$",
-                  "required": true,
-                  "type": "string"
-                },
-                "view": {
-                  "description": "Optional. PublisherModel view specifying which fields to read.",
-                  "enum": [
-                    "PUBLISHER_MODEL_VIEW_UNSPECIFIED",
-                    "PUBLISHER_MODEL_VIEW_BASIC",
-                    "PUBLISHER_MODEL_VIEW_FULL",
-                    "PUBLISHER_MODEL_VERSION_VIEW_BASIC"
-                  ],
-                  "enumDescriptions": [
-                    "The default / unset value. The API will default to the BASIC view.",
-                    "Include basic metadata about the publisher model, but not the full contents.",
-                    "Include everything.",
-                    "Include: VersionId, ModelVersionExternalName, and SupportedActions."
-                  ],
-                  "location": "query",
-                  "type": "string"
-                }
-              },
-              "path": "v1beta1/{+parent}/models",
-              "response": {
-                "$ref": "GoogleCloudAiplatformV1beta1ListPublisherModelsResponse"
-              },
-              "scopes": [
-                "https://www.googleapis.com/auth/cloud-platform"
-              ]
-            }
-          }
+    "CloudAiNlLlmProtoServiceUsageMetadata": {
+      "description": "Usage metadata about response(s).",
+      "id": "CloudAiNlLlmProtoServiceUsageMetadata",
+      "properties": {
+        "candidatesTokenCount": {
+          "description": "Number of tokens in the response(s).",
+          "format": "int32",
+          "type": "integer"
+        },
+        "promptTokenCount": {
+          "description": "Number of tokens in the request.",
+          "format": "int32",
+          "type": "integer"
+        },
+        "totalTokenCount": {
+          "format": "int32",
+          "type": "integer"
         }
-      }
-    }
-  },
-  "revision": "20231129",
-  "rootUrl": "https://aiplatform.googleapis.com/",
-  "schemas": {
+      },
+      "type": "object"
+    },
     "GoogleApiHttpBody": {
       "description": "Message that represents an arbitrary HTTP body. It should only be used for payload formats that can't be represented as JSON, such as raw binary or an HTML page. This message can be used both in streaming and non-streaming API methods in the request as well as the response. It can be used as a top-level request field, which is convenient if one wants to extract parameters from either the URL or HTTP template into the request fields and also want access to the raw HTTP body. Example: message GetResourceRequest { // A unique request id. string request_id = 1; // The raw HTTP body is bound to this field. google.api.HttpBody http_body = 2; } service ResourceService { rpc GetResource(GetResourceRequest) returns (google.api.HttpBody); rpc UpdateResource(google.api.HttpBody) returns (google.protobuf.Empty); } Example with streaming methods: service CaldavService { rpc GetCalendar(stream google.api.HttpBody) returns (stream google.api.HttpBody); rpc UpdateCalendar(stream google.api.HttpBody) returns (stream google.api.HttpBody); } Use of this type only changes how the request and response bodies are handled, all other features will continue to work unchanged.",
       "id": "GoogleApiHttpBody",
@@ -17392,14 +18147,14 @@
       "id": "GoogleCloudAiplatformV1beta1BatchPredictionJobInstanceConfig",
       "properties": {
         "excludedFields": {
-          "description": "Fields that will be excluded in the prediction instance that is sent to the Model. Excluded will be attached to the batch prediction output if key_field is not specified. When excluded_fields is populated, included_fields must be empty. The input must be JSONL with objects at each line, CSV, BigQuery or TfRecord.",
+          "description": "Fields that will be excluded in the prediction instance that is sent to the Model. Excluded will be attached to the batch prediction output if key_field is not specified. When excluded_fields is populated, included_fields must be empty. The input must be JSONL with objects at each line, BigQuery or TfRecord.",
           "items": {
             "type": "string"
           },
           "type": "array"
         },
         "includedFields": {
-          "description": "Fields that will be included in the prediction instance that is sent to the Model. If instance_type is `array`, the order of field names in included_fields also determines the order of the values in the array. When included_fields is populated, excluded_fields must be empty. The input must be JSONL with objects at each line, CSV, BigQuery or TfRecord.",
+          "description": "Fields that will be included in the prediction instance that is sent to the Model. If instance_type is `array`, the order of field names in included_fields also determines the order of the values in the array. When included_fields is populated, excluded_fields must be empty. The input must be JSONL with objects at each line, BigQuery or TfRecord.",
           "items": {
             "type": "string"
           },
@@ -17582,16 +18337,16 @@
       "type": "object"
     },
     "GoogleCloudAiplatformV1beta1Blob": {
-      "description": "Content blob.",
+      "description": "Raw media bytes. Text should not be sent as raw bytes, use the 'text' field.",
       "id": "GoogleCloudAiplatformV1beta1Blob",
       "properties": {
         "data": {
-          "description": "Required. Data.",
+          "description": "Required. Raw bytes for media formats.",
           "format": "byte",
           "type": "string"
         },
         "mimeType": {
-          "description": "Required. Mime type of the data.",
+          "description": "Required. The IANA standard MIME type of the source data.",
           "type": "string"
         }
       },
@@ -17666,7 +18421,7 @@
       "type": "object"
     },
     "GoogleCloudAiplatformV1beta1Candidate": {
-      "description": "Generated candidate.",
+      "description": "A response candidate generated from the model.",
       "id": "GoogleCloudAiplatformV1beta1Candidate",
       "properties": {
         "citationMetadata": {
@@ -17680,12 +18435,12 @@
           "readOnly": true
         },
         "finishMessage": {
-          "description": "Output only. A string that describes the filtering behavior in more detail. Only filled when reason is set.",
+          "description": "Output only. Describes the reason the mode stopped generating tokens in more detail. This is only filled when `finish_reason` is set.",
           "readOnly": true,
           "type": "string"
         },
         "finishReason": {
-          "description": "Output only. The reason why the model stopped generating tokens.",
+          "description": "Output only. The reason why the model stopped generating tokens. If empty, the model has not stopped generating the tokens.",
           "enum": [
             "FINISH_REASON_UNSPECIFIED",
             "STOP",
@@ -17712,7 +18467,7 @@
           "type": "integer"
         },
         "safetyRatings": {
-          "description": "Output only. Safety ratings of the generated content.",
+          "description": "Output only. List of ratings for the safety of a response candidate. There is at most one rating per category.",
           "items": {
             "$ref": "GoogleCloudAiplatformV1beta1SafetyRating"
           },
@@ -17934,18 +18689,18 @@
       "type": "object"
     },
     "GoogleCloudAiplatformV1beta1Content": {
-      "description": "A single turn in a conversation with the model.",
+      "description": "The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn.",
       "id": "GoogleCloudAiplatformV1beta1Content",
       "properties": {
         "parts": {
-          "description": "Required. Ordered parts that make up a message. Parts may have different MIME types.",
+          "description": "Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types.",
           "items": {
             "$ref": "GoogleCloudAiplatformV1beta1Part"
           },
           "type": "array"
         },
         "role": {
-          "description": "Optional. The role in a conversation associated with this content. Set it only if a content represents a turn in a conversations, otherwise no need to set role. Possible values: user, model.",
+          "description": "Optional. The producer of the content. Must be either 'user' or 'model'. Useful to set for multi-turn conversations, otherwise can be left blank or unset.",
           "type": "string"
         }
       },
@@ -18597,6 +19352,13 @@
           "description": "Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}`",
           "type": "string"
         },
+        "models": {
+          "description": "Optional. The name of the Model resources for which to generate a mapping to artifact URIs. Applicable only to some of the Google-provided custom jobs. Format: `projects/{project}/locations/{location}/models/{model}` In order to retrieve a specific version of the model, also provide the version ID or version alias. Example: `projects/{project}/locations/{location}/models/{model}@2` or `projects/{project}/locations/{location}/models/{model}@golden` If no version ID or alias is specified, the \"default\" version will be returned. The \"default\" version alias is created for the first version of the model, and can be moved to other versions later on. There will be exactly one default version.",
+          "items": {
+            "type": "string"
+          },
+          "type": "array"
+        },
         "network": {
           "description": "Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network.",
           "type": "string"
@@ -21817,7 +22579,7 @@
           "type": "string"
         },
         "mimeType": {
-          "description": "Required. Mime type of the data.",
+          "description": "Required. The IANA standard MIME type of the source data.",
           "type": "string"
         }
       },
@@ -21985,7 +22747,7 @@
       "type": "object"
     },
     "GoogleCloudAiplatformV1beta1FunctionDeclaration": {
-      "description": "Function declaration details.",
+      "description": "Structured representation of a function declaration as defined by the [OpenAPI 3.0 specification](https://spec.openapis.org/oas/v3.0.3). Included in this declaration are the function name and parameters. This FunctionDeclaration is a representation of a block of code that can be used as a `Tool` by the model and executed by the client.",
       "id": "GoogleCloudAiplatformV1beta1FunctionDeclaration",
       "properties": {
         "description": {
@@ -21998,7 +22760,7 @@
         },
         "parameters": {
           "$ref": "GoogleCloudAiplatformV1beta1Schema",
-          "description": "Optional. Describes the parameters to this function. Reflects the Open API 3.03 Parameter Object string Key: the name of the parameter. Parameter names are case sensitive. For function with no parameters, this can be left unset."
+          "description": "Optional. Describes the parameters to this function in JSON Schema Object format. Reflects the Open API 3.03 Parameter Object. string Key: the name of the parameter. Parameter names are case sensitive. Schema Value: the Schema defining the type used for the parameter. For function with no parameters, this can be left unset. Example with 1 required and 1 optional parameter: type: OBJECT properties: param1: type: STRING param2: type: INTEGER required: - param1"
         }
       },
       "type": "object"
@@ -22087,13 +22849,14 @@
       "id": "GoogleCloudAiplatformV1beta1GenerateContentRequest",
       "properties": {
         "contents": {
-          "description": "Required. Input content.",
+          "description": "Required. The content of the current conversation with the model. For single-turn queries, this is a single instance. For multi-turn queries, this is a repeated field that contains conversation history + latest request.",
           "items": {
             "$ref": "GoogleCloudAiplatformV1beta1Content"
           },
           "type": "array"
         },
         "endpoint": {
+          "deprecated": true,
           "description": "Required. The name of the Endpoint requested to serve the prediction. Format: `projects/{project}/locations/{location}/endpoints/{endpoint}`",
           "type": "string"
         },
@@ -22109,7 +22872,7 @@
           "type": "array"
         },
         "tools": {
-          "description": "Optional. Tools that the model may use to generate response.",
+          "description": "Optional. A list of `Tools` the model may use to generate the next response. A `Tool` is a piece of code that enables the system to interact with external systems to perform an action, or set of actions, outside of knowledge and scope of the model. The only supported tool is currently `Function`",
           "items": {
             "$ref": "GoogleCloudAiplatformV1beta1Tool"
           },
@@ -22207,38 +22970,11 @@
           "format": "int32",
           "type": "integer"
         },
-        "echo": {
-          "description": "Optional. Echo.",
-          "type": "boolean"
-        },
-        "frequencyPenalty": {
-          "description": "Optional. Frequency penalties.",
-          "format": "float",
-          "type": "number"
-        },
-        "logitBias": {
-          "additionalProperties": {
-            "format": "float",
-            "type": "number"
-          },
-          "description": "Optional. Logit bias.",
-          "type": "object"
-        },
-        "logprobs": {
-          "description": "Optional. Logit probabilities.",
-          "format": "int32",
-          "type": "integer"
-        },
         "maxOutputTokens": {
           "description": "Optional. The maximum number of output tokens to generate per message.",
           "format": "int32",
           "type": "integer"
         },
-        "presencePenalty": {
-          "description": "Optional. Positive penalties.",
-          "format": "float",
-          "type": "number"
-        },
         "stopSequences": {
           "description": "Optional. Stop sequences.",
           "items": {
@@ -25465,7 +26201,8 @@
             "CUSTOM",
             "BQML",
             "MODEL_GARDEN",
-            "GENIE"
+            "GENIE",
+            "CUSTOM_TEXT_EMBEDDING"
           ],
           "enumDescriptions": [
             "Should not be used.",
@@ -25473,7 +26210,8 @@
             "The Model is uploaded by user or custom training pipeline.",
             "The Model is registered and sync'ed from BigQuery ML.",
             "The Model is saved or tuned from Model Garden.",
-            "The Model is saved or tuned from Genie."
+            "The Model is saved or tuned from Genie.",
+            "The Model is uploaded by text embedding finetuning pipeline."
           ],
           "type": "string"
         }
@@ -26405,7 +27143,7 @@
       "type": "object"
     },
     "GoogleCloudAiplatformV1beta1Part": {
-      "description": "Content part.",
+      "description": "A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes.",
       "id": "GoogleCloudAiplatformV1beta1Part",
       "properties": {
         "fileData": {
@@ -27333,6 +28071,20 @@
           "description": "Output only. Immutable. The version ID of the PublisherModel. A new version is committed when a new model version is uploaded under an existing model id. It is an auto-incrementing decimal number in string representation.",
           "readOnly": true,
           "type": "string"
+        },
+        "versionState": {
+          "description": "Optional. Indicates the state of the model version.",
+          "enum": [
+            "VERSION_STATE_UNSPECIFIED",
+            "VERSION_STATE_STABLE",
+            "VERSION_STATE_UNSTABLE"
+          ],
+          "enumDescriptions": [
+            "The version state is unspecified.",
+            "Used to indicate the version is stable.",
+            "Used to indicate the version is unstable."
+          ],
+          "type": "string"
         }
       },
       "type": "object"
@@ -27412,6 +28164,10 @@
           "description": "Optional. Default model display name.",
           "type": "string"
         },
+        "publicArtifactUri": {
+          "description": "Optional. The signed URI for ephemeral Cloud Storage access to model artifact.",
+          "type": "string"
+        },
         "sharedResources": {
           "description": "The resource name of the shared DeploymentResourcePool to deploy on. Format: `projects/{project}/locations/{location}/deploymentResourcePools/{deployment_resource_pool}`",
           "type": "string"
@@ -27493,6 +28249,10 @@
       "description": "Reference to a resource.",
       "id": "GoogleCloudAiplatformV1beta1PublisherModelResourceReference",
       "properties": {
+        "description": {
+          "description": "Description of the resource.",
+          "type": "string"
+        },
         "resourceName": {
           "description": "The resource name of the Google Cloud resource.",
           "type": "string"
@@ -27500,6 +28260,10 @@
         "uri": {
           "description": "The URI of the resource.",
           "type": "string"
+        },
+        "useCase": {
+          "description": "Use case (CUJ) of the resource.",
+          "type": "string"
         }
       },
       "type": "object"
@@ -28579,7 +29343,7 @@
       "type": "object"
     },
     "GoogleCloudAiplatformV1beta1Schema": {
-      "description": "Represents a select subset of an OpenAPI 3.0 Schema object. Schema is used to define the format of input/output data. More fields may be added in the future as needed. https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.2.md#schemaObject",
+      "description": "Schema is used to define the format of input/output data. Represents a select subset of an [OpenAPI 3.0 schema object](https://spec.openapis.org/oas/v3.0.3#schema). More fields may be added in the future as needed.",
       "id": "GoogleCloudAiplatformV1beta1Schema",
       "properties": {
         "description": {
@@ -33808,7 +34572,7 @@
       "type": "object"
     },
     "GoogleCloudAiplatformV1beta1Tool": {
-      "description": "Tool details that the model may use to generate response.",
+      "description": "Tool details that the model may use to generate response. A `Tool` is a piece of code that enables the system to interact with external systems to perform an action, or set of actions, outside of knowledge and scope of the model.",
       "id": "GoogleCloudAiplatformV1beta1Tool",
       "properties": {
         "functionDeclarations": {
@@ -34966,6 +35730,70 @@
         }
       },
       "type": "object"
+    },
+    "IntelligenceCloudAutomlXpsMetricEntry": {
+      "id": "IntelligenceCloudAutomlXpsMetricEntry",
+      "properties": {
+        "argentumMetricId": {
+          "description": "For billing metrics that are using legacy sku's, set the legacy billing metric id here. This will be sent to Chemist as the \"cloudbilling.googleapis.com/argentum_metric_id\" label. Otherwise leave empty.",
+          "type": "string"
+        },
+        "doubleValue": {
+          "description": "A double value.",
+          "format": "double",
+          "type": "number"
+        },
+        "int64Value": {
+          "description": "A signed 64-bit integer value.",
+          "format": "int64",
+          "type": "string"
+        },
+        "metricName": {
+          "description": "The metric name defined in the service configuration.",
+          "type": "string"
+        },
+        "systemLabels": {
+          "description": "Billing system labels for this (metric, value) pair.",
+          "items": {
+            "$ref": "IntelligenceCloudAutomlXpsMetricEntryLabel"
+          },
+          "type": "array"
+        }
+      },
+      "type": "object"
+    },
+    "IntelligenceCloudAutomlXpsMetricEntryLabel": {
+      "id": "IntelligenceCloudAutomlXpsMetricEntryLabel",
+      "properties": {
+        "labelName": {
+          "description": "The name of the label.",
+          "type": "string"
+        },
+        "labelValue": {
+          "description": "The value of the label.",
+          "type": "string"
+        }
+      },
+      "type": "object"
+    },
+    "IntelligenceCloudAutomlXpsReportingMetrics": {
+      "id": "IntelligenceCloudAutomlXpsReportingMetrics",
+      "properties": {
+        "effectiveTrainingDuration": {
+          "deprecated": true,
+          "description": "The effective time training used. If set, this is used for quota management and billing. Deprecated. AutoML BE doesn't use this. Don't set.",
+          "format": "google-duration",
+          "type": "string"
+        },
+        "metricEntries": {
+          "description": "One entry per metric name. The values must be aggregated per metric name.",
+          "items": {
+            "$ref": "IntelligenceCloudAutomlXpsMetricEntry"
+          },
+          "type": "array"
+        }
+      },
+      "type": "object"
     }
   },
   "servicePath": "",
diff --git a/googleapiclient/discovery_cache/documents/alertcenter.v1beta1.json b/googleapiclient/discovery_cache/documents/alertcenter.v1beta1.json
index 05cd00f8b12..67e9e218587 100644
--- a/googleapiclient/discovery_cache/documents/alertcenter.v1beta1.json
+++ b/googleapiclient/discovery_cache/documents/alertcenter.v1beta1.json
@@ -423,7 +423,7 @@
       }
     }
   },
-  "revision": "20231204",
+  "revision": "20231211",
   "rootUrl": "https://alertcenter.googleapis.com/",
   "schemas": {
     "AbuseDetected": {
diff --git a/googleapiclient/discovery_cache/documents/alloydb.v1.json b/googleapiclient/discovery_cache/documents/alloydb.v1.json
index 1d292af5290..6f5c2bfbf5f 100644
--- a/googleapiclient/discovery_cache/documents/alloydb.v1.json
+++ b/googleapiclient/discovery_cache/documents/alloydb.v1.json
@@ -1489,7 +1489,7 @@
       }
     }
   },
-  "revision": "20231109",
+  "revision": "20231128",
   "rootUrl": "https://alloydb.googleapis.com/",
   "schemas": {
     "AutomatedBackupPolicy": {
@@ -1639,6 +1639,11 @@
           "readOnly": true,
           "type": "boolean"
         },
+        "satisfiesPzs": {
+          "description": "Output only. Reserved for future use.",
+          "readOnly": true,
+          "type": "boolean"
+        },
         "sizeBytes": {
           "description": "Output only. The size of the backup in bytes.",
           "format": "int64",
@@ -1896,6 +1901,11 @@
           "readOnly": true,
           "type": "boolean"
         },
+        "satisfiesPzs": {
+          "description": "Output only. Reserved for future use.",
+          "readOnly": true,
+          "type": "boolean"
+        },
         "secondaryConfig": {
           "$ref": "SecondaryConfig",
           "description": "Cross Region replication config specific to SECONDARY cluster."
@@ -2386,6 +2396,11 @@
           "readOnly": true,
           "type": "boolean"
         },
+        "satisfiesPzs": {
+          "description": "Output only. Reserved for future use.",
+          "readOnly": true,
+          "type": "boolean"
+        },
         "state": {
           "description": "Output only. The current serving state of the instance.",
           "enum": [
@@ -3175,7 +3190,7 @@
           "type": "string"
         },
         "resourceContainer": {
-          "description": "Closest parent container of this resource. In GCP, 'container' refers to a Cloud Resource Manager project. It must be resource name of a Cloud Resource Manager project with the format of \"provider//\", such as \"gcp/projects/123\". For GCP provided resources, number should be project number.",
+          "description": "Closest parent container of this resource. In GCP, 'container' refers to a Cloud Resource Manager project. It must be resource name of a Cloud Resource Manager project with the format of \"provider//\", such as \"projects/123\". For GCP provided resources, number should be project number.",
           "type": "string"
         },
         "resourceName": {
@@ -3502,13 +3517,35 @@
           "description": "The type of the instance. Specified at creation time.",
           "enum": [
             "INSTANCE_TYPE_UNSPECIFIED",
+            "SUB_RESOURCE_TYPE_UNSPECIFIED",
             "PRIMARY",
             "SECONDARY",
             "READ_REPLICA",
-            "OTHER"
+            "OTHER",
+            "SUB_RESOURCE_TYPE_PRIMARY",
+            "SUB_RESOURCE_TYPE_SECONDARY",
+            "SUB_RESOURCE_TYPE_READ_REPLICA",
+            "SUB_RESOURCE_TYPE_OTHER"
+          ],
+          "enumDeprecated": [
+            true,
+            false,
+            true,
+            true,
+            true,
+            true,
+            false,
+            false,
+            false,
+            false
           ],
           "enumDescriptions": [
             "",
+            "For rest of the other categories.",
+            "A regular primary database instance.",
+            "A cluster or an instance acting as a secondary.",
+            "An instance acting as a read-replica.",
+            "For rest of the other categories.",
             "A regular primary database instance.",
             "A cluster or an instance acting as a secondary.",
             "An instance acting as a read-replica.",
@@ -3529,7 +3566,7 @@
           "description": "The product this resource represents."
         },
         "resourceContainer": {
-          "description": "Closest parent Cloud Resource Manager container of this resource. It must be resource name of a Cloud Resource Manager project with the format of \"provider//\", such as \"gcp/projects/123\". For GCP provided resources, number should be project number.",
+          "description": "Closest parent Cloud Resource Manager container of this resource. It must be resource name of a Cloud Resource Manager project with the format of \"/\", such as \"projects/123\". For GCP provided resources, number should be project number.",
           "type": "string"
         },
         "resourceName": {
@@ -3604,20 +3641,44 @@
           "description": "The specific engine that the underlying database is running.",
           "enum": [
             "ENGINE_UNSPECIFIED",
+            "ENGINE_MYSQL",
             "MYSQL",
+            "ENGINE_POSTGRES",
             "POSTGRES",
+            "ENGINE_SQL_SERVER",
             "SQL_SERVER",
+            "ENGINE_NATIVE",
             "NATIVE",
+            "ENGINE_CLOUD_SPANNER_WITH_POSTGRES_DIALECT",
             "SPANGRES",
             "ENGINE_OTHER"
           ],
+          "enumDeprecated": [
+            false,
+            false,
+            true,
+            false,
+            true,
+            false,
+            true,
+            false,
+            true,
+            false,
+            true,
+            false
+          ],
           "enumDescriptions": [
             "UNSPECIFIED means engine type is not known or available.",
+            "MySQL binary running as an engine in the database instance.",
             "MySQL binary running as engine in database instance.",
             "Postgres binary running as engine in database instance.",
+            "Postgres binary running as engine in database instance.",
             "SQLServer binary running as engine in database instance.",
+            "SQLServer binary running as engine in database instance.",
+            "Native database binary running as engine in instance.",
             "Native database binary running as engine in instance.",
             "Cloud Spanner with Postgres dialect.",
+            "Cloud Spanner with Postgres dialect.",
             "Other refers to rest of other database engine. This is to be when engine is known, but it is not present in this enum."
           ],
           "type": "string"
@@ -3626,18 +3687,38 @@
           "description": "Type of specific database product. It could be CloudSQL, AlloyDB etc..",
           "enum": [
             "PRODUCT_TYPE_UNSPECIFIED",
+            "PRODUCT_TYPE_CLOUD_SQL",
             "CLOUD_SQL",
+            "PRODUCT_TYPE_ALLOYDB",
             "ALLOYDB",
+            "PRODUCT_TYPE_SPANNER",
             "SPANNER",
+            "PRODUCT_TYPE_ON_PREM",
             "ON_PREM",
             "PRODUCT_TYPE_OTHER"
           ],
+          "enumDeprecated": [
+            false,
+            false,
+            true,
+            false,
+            true,
+            false,
+            true,
+            false,
+            true,
+            false
+          ],
           "enumDescriptions": [
             "UNSPECIFIED means product type is not known or available.",
             "Cloud SQL product area in GCP",
+            "Cloud SQL product area in GCP",
             "AlloyDB product area in GCP",
+            "AlloyDB product area in GCP",
+            "Spanner product area in GCP",
             "Spanner product area in GCP",
             "On premises database product.",
+            "On premises database product.",
             "Other refers to rest of other product type. This is to be when product type is known, but it is not present in this enum."
           ],
           "type": "string"
diff --git a/googleapiclient/discovery_cache/documents/alloydb.v1alpha.json b/googleapiclient/discovery_cache/documents/alloydb.v1alpha.json
index dc0e4033a0c..1b53640825d 100644
--- a/googleapiclient/discovery_cache/documents/alloydb.v1alpha.json
+++ b/googleapiclient/discovery_cache/documents/alloydb.v1alpha.json
@@ -1489,7 +1489,7 @@
       }
     }
   },
-  "revision": "20231109",
+  "revision": "20231128",
   "rootUrl": "https://alloydb.googleapis.com/",
   "schemas": {
     "AutomatedBackupPolicy": {
@@ -1639,6 +1639,11 @@
           "readOnly": true,
           "type": "boolean"
         },
+        "satisfiesPzi": {
+          "description": "Output only. Reserved for future use.",
+          "readOnly": true,
+          "type": "boolean"
+        },
         "satisfiesPzs": {
           "description": "Output only. Reserved for future use.",
           "readOnly": true,
@@ -1905,6 +1910,11 @@
           "readOnly": true,
           "type": "boolean"
         },
+        "satisfiesPzi": {
+          "description": "Output only. Reserved for future use.",
+          "readOnly": true,
+          "type": "boolean"
+        },
         "satisfiesPzs": {
           "description": "Output only. Reserved for future use.",
           "readOnly": true,
@@ -2420,6 +2430,11 @@
           "readOnly": true,
           "type": "boolean"
         },
+        "satisfiesPzi": {
+          "description": "Output only. Reserved for future use.",
+          "readOnly": true,
+          "type": "boolean"
+        },
         "satisfiesPzs": {
           "description": "Output only. Reserved for future use.",
           "readOnly": true,
@@ -3229,7 +3244,7 @@
           "type": "string"
         },
         "resourceContainer": {
-          "description": "Closest parent container of this resource. In GCP, 'container' refers to a Cloud Resource Manager project. It must be resource name of a Cloud Resource Manager project with the format of \"provider//\", such as \"gcp/projects/123\". For GCP provided resources, number should be project number.",
+          "description": "Closest parent container of this resource. In GCP, 'container' refers to a Cloud Resource Manager project. It must be resource name of a Cloud Resource Manager project with the format of \"provider//\", such as \"projects/123\". For GCP provided resources, number should be project number.",
           "type": "string"
         },
         "resourceName": {
@@ -3556,13 +3571,35 @@
           "description": "The type of the instance. Specified at creation time.",
           "enum": [
             "INSTANCE_TYPE_UNSPECIFIED",
+            "SUB_RESOURCE_TYPE_UNSPECIFIED",
             "PRIMARY",
             "SECONDARY",
             "READ_REPLICA",
-            "OTHER"
+            "OTHER",
+            "SUB_RESOURCE_TYPE_PRIMARY",
+            "SUB_RESOURCE_TYPE_SECONDARY",
+            "SUB_RESOURCE_TYPE_READ_REPLICA",
+            "SUB_RESOURCE_TYPE_OTHER"
+          ],
+          "enumDeprecated": [
+            true,
+            false,
+            true,
+            true,
+            true,
+            true,
+            false,
+            false,
+            false,
+            false
           ],
           "enumDescriptions": [
             "",
+            "For rest of the other categories.",
+            "A regular primary database instance.",
+            "A cluster or an instance acting as a secondary.",
+            "An instance acting as a read-replica.",
+            "For rest of the other categories.",
             "A regular primary database instance.",
             "A cluster or an instance acting as a secondary.",
             "An instance acting as a read-replica.",
@@ -3583,7 +3620,7 @@
           "description": "The product this resource represents."
         },
         "resourceContainer": {
-          "description": "Closest parent Cloud Resource Manager container of this resource. It must be resource name of a Cloud Resource Manager project with the format of \"provider//\", such as \"gcp/projects/123\". For GCP provided resources, number should be project number.",
+          "description": "Closest parent Cloud Resource Manager container of this resource. It must be resource name of a Cloud Resource Manager project with the format of \"/\", such as \"projects/123\". For GCP provided resources, number should be project number.",
           "type": "string"
         },
         "resourceName": {
@@ -3658,19 +3695,43 @@
           "description": "The specific engine that the underlying database is running.",
           "enum": [
             "ENGINE_UNSPECIFIED",
+            "ENGINE_MYSQL",
             "MYSQL",
+            "ENGINE_POSTGRES",
             "POSTGRES",
+            "ENGINE_SQL_SERVER",
             "SQL_SERVER",
+            "ENGINE_NATIVE",
             "NATIVE",
+            "ENGINE_CLOUD_SPANNER_WITH_POSTGRES_DIALECT",
             "SPANGRES",
             "ENGINE_OTHER"
           ],
+          "enumDeprecated": [
+            false,
+            false,
+            true,
+            false,
+            true,
+            false,
+            true,
+            false,
+            true,
+            false,
+            true,
+            false
+          ],
           "enumDescriptions": [
             "UNSPECIFIED means engine type is not known or available.",
+            "MySQL binary running as an engine in the database instance.",
             "MySQL binary running as engine in database instance.",
             "Postgres binary running as engine in database instance.",
+            "Postgres binary running as engine in database instance.",
+            "SQLServer binary running as engine in database instance.",
             "SQLServer binary running as engine in database instance.",
             "Native database binary running as engine in instance.",
+            "Native database binary running as engine in instance.",
+            "Cloud Spanner with Postgres dialect.",
             "Cloud Spanner with Postgres dialect.",
             "Other refers to rest of other database engine. This is to be when engine is known, but it is not present in this enum."
           ],
@@ -3680,18 +3741,38 @@
           "description": "Type of specific database product. It could be CloudSQL, AlloyDB etc..",
           "enum": [
             "PRODUCT_TYPE_UNSPECIFIED",
+            "PRODUCT_TYPE_CLOUD_SQL",
             "CLOUD_SQL",
+            "PRODUCT_TYPE_ALLOYDB",
             "ALLOYDB",
+            "PRODUCT_TYPE_SPANNER",
             "SPANNER",
+            "PRODUCT_TYPE_ON_PREM",
             "ON_PREM",
             "PRODUCT_TYPE_OTHER"
           ],
+          "enumDeprecated": [
+            false,
+            false,
+            true,
+            false,
+            true,
+            false,
+            true,
+            false,
+            true,
+            false
+          ],
           "enumDescriptions": [
             "UNSPECIFIED means product type is not known or available.",
             "Cloud SQL product area in GCP",
+            "Cloud SQL product area in GCP",
             "AlloyDB product area in GCP",
+            "AlloyDB product area in GCP",
+            "Spanner product area in GCP",
             "Spanner product area in GCP",
             "On premises database product.",
+            "On premises database product.",
             "Other refers to rest of other product type. This is to be when product type is known, but it is not present in this enum."
           ],
           "type": "string"
diff --git a/googleapiclient/discovery_cache/documents/alloydb.v1beta.json b/googleapiclient/discovery_cache/documents/alloydb.v1beta.json
index 5c583f623b1..c82ce3412ea 100644
--- a/googleapiclient/discovery_cache/documents/alloydb.v1beta.json
+++ b/googleapiclient/discovery_cache/documents/alloydb.v1beta.json
@@ -1486,7 +1486,7 @@
       }
     }
   },
-  "revision": "20231109",
+  "revision": "20231128",
   "rootUrl": "https://alloydb.googleapis.com/",
   "schemas": {
     "AutomatedBackupPolicy": {
@@ -1636,6 +1636,11 @@
           "readOnly": true,
           "type": "boolean"
         },
+        "satisfiesPzs": {
+          "description": "Output only. Reserved for future use.",
+          "readOnly": true,
+          "type": "boolean"
+        },
         "sizeBytes": {
           "description": "Output only. The size of the backup in bytes.",
           "format": "int64",
@@ -1887,6 +1892,11 @@
           "readOnly": true,
           "type": "boolean"
         },
+        "satisfiesPzs": {
+          "description": "Output only. Reserved for future use.",
+          "readOnly": true,
+          "type": "boolean"
+        },
         "secondaryConfig": {
           "$ref": "SecondaryConfig",
           "description": "Cross Region replication config specific to SECONDARY cluster."
@@ -2397,6 +2407,11 @@
           "readOnly": true,
           "type": "boolean"
         },
+        "satisfiesPzs": {
+          "description": "Output only. Reserved for future use.",
+          "readOnly": true,
+          "type": "boolean"
+        },
         "state": {
           "description": "Output only. The current serving state of the instance.",
           "enum": [
@@ -3190,7 +3205,7 @@
           "type": "string"
         },
         "resourceContainer": {
-          "description": "Closest parent container of this resource. In GCP, 'container' refers to a Cloud Resource Manager project. It must be resource name of a Cloud Resource Manager project with the format of \"provider//\", such as \"gcp/projects/123\". For GCP provided resources, number should be project number.",
+          "description": "Closest parent container of this resource. In GCP, 'container' refers to a Cloud Resource Manager project. It must be resource name of a Cloud Resource Manager project with the format of \"provider//\", such as \"projects/123\". For GCP provided resources, number should be project number.",
           "type": "string"
         },
         "resourceName": {
@@ -3517,13 +3532,35 @@
           "description": "The type of the instance. Specified at creation time.",
           "enum": [
             "INSTANCE_TYPE_UNSPECIFIED",
+            "SUB_RESOURCE_TYPE_UNSPECIFIED",
             "PRIMARY",
             "SECONDARY",
             "READ_REPLICA",
-            "OTHER"
+            "OTHER",
+            "SUB_RESOURCE_TYPE_PRIMARY",
+            "SUB_RESOURCE_TYPE_SECONDARY",
+            "SUB_RESOURCE_TYPE_READ_REPLICA",
+            "SUB_RESOURCE_TYPE_OTHER"
+          ],
+          "enumDeprecated": [
+            true,
+            false,
+            true,
+            true,
+            true,
+            true,
+            false,
+            false,
+            false,
+            false
           ],
           "enumDescriptions": [
             "",
+            "For rest of the other categories.",
+            "A regular primary database instance.",
+            "A cluster or an instance acting as a secondary.",
+            "An instance acting as a read-replica.",
+            "For rest of the other categories.",
             "A regular primary database instance.",
             "A cluster or an instance acting as a secondary.",
             "An instance acting as a read-replica.",
@@ -3544,7 +3581,7 @@
           "description": "The product this resource represents."
         },
         "resourceContainer": {
-          "description": "Closest parent Cloud Resource Manager container of this resource. It must be resource name of a Cloud Resource Manager project with the format of \"provider//\", such as \"gcp/projects/123\". For GCP provided resources, number should be project number.",
+          "description": "Closest parent Cloud Resource Manager container of this resource. It must be resource name of a Cloud Resource Manager project with the format of \"/\", such as \"projects/123\". For GCP provided resources, number should be project number.",
           "type": "string"
         },
         "resourceName": {
@@ -3619,20 +3656,44 @@
           "description": "The specific engine that the underlying database is running.",
           "enum": [
             "ENGINE_UNSPECIFIED",
+            "ENGINE_MYSQL",
             "MYSQL",
+            "ENGINE_POSTGRES",
             "POSTGRES",
+            "ENGINE_SQL_SERVER",
             "SQL_SERVER",
+            "ENGINE_NATIVE",
             "NATIVE",
+            "ENGINE_CLOUD_SPANNER_WITH_POSTGRES_DIALECT",
             "SPANGRES",
             "ENGINE_OTHER"
           ],
+          "enumDeprecated": [
+            false,
+            false,
+            true,
+            false,
+            true,
+            false,
+            true,
+            false,
+            true,
+            false,
+            true,
+            false
+          ],
           "enumDescriptions": [
             "UNSPECIFIED means engine type is not known or available.",
+            "MySQL binary running as an engine in the database instance.",
             "MySQL binary running as engine in database instance.",
             "Postgres binary running as engine in database instance.",
+            "Postgres binary running as engine in database instance.",
             "SQLServer binary running as engine in database instance.",
+            "SQLServer binary running as engine in database instance.",
+            "Native database binary running as engine in instance.",
             "Native database binary running as engine in instance.",
             "Cloud Spanner with Postgres dialect.",
+            "Cloud Spanner with Postgres dialect.",
             "Other refers to rest of other database engine. This is to be when engine is known, but it is not present in this enum."
           ],
           "type": "string"
@@ -3641,18 +3702,38 @@
           "description": "Type of specific database product. It could be CloudSQL, AlloyDB etc..",
           "enum": [
             "PRODUCT_TYPE_UNSPECIFIED",
+            "PRODUCT_TYPE_CLOUD_SQL",
             "CLOUD_SQL",
+            "PRODUCT_TYPE_ALLOYDB",
             "ALLOYDB",
+            "PRODUCT_TYPE_SPANNER",
             "SPANNER",
+            "PRODUCT_TYPE_ON_PREM",
             "ON_PREM",
             "PRODUCT_TYPE_OTHER"
           ],
+          "enumDeprecated": [
+            false,
+            false,
+            true,
+            false,
+            true,
+            false,
+            true,
+            false,
+            true,
+            false
+          ],
           "enumDescriptions": [
             "UNSPECIFIED means product type is not known or available.",
             "Cloud SQL product area in GCP",
+            "Cloud SQL product area in GCP",
             "AlloyDB product area in GCP",
+            "AlloyDB product area in GCP",
+            "Spanner product area in GCP",
             "Spanner product area in GCP",
             "On premises database product.",
+            "On premises database product.",
             "Other refers to rest of other product type. This is to be when product type is known, but it is not present in this enum."
           ],
           "type": "string"
diff --git a/googleapiclient/discovery_cache/documents/analyticsadmin.v1alpha.json b/googleapiclient/discovery_cache/documents/analyticsadmin.v1alpha.json
index 6a3335cb5bc..56f58aa188f 100644
--- a/googleapiclient/discovery_cache/documents/analyticsadmin.v1alpha.json
+++ b/googleapiclient/discovery_cache/documents/analyticsadmin.v1alpha.json
@@ -4298,7 +4298,7 @@
       }
     }
   },
-  "revision": "20231205",
+  "revision": "20231210",
   "rootUrl": "https://analyticsadmin.googleapis.com/",
   "schemas": {
     "GoogleAnalyticsAdminV1alphaAccessBetweenFilter": {
diff --git a/googleapiclient/discovery_cache/documents/analyticsadmin.v1beta.json b/googleapiclient/discovery_cache/documents/analyticsadmin.v1beta.json
index 46ee4bd3d12..d2660a3e31e 100644
--- a/googleapiclient/discovery_cache/documents/analyticsadmin.v1beta.json
+++ b/googleapiclient/discovery_cache/documents/analyticsadmin.v1beta.json
@@ -1628,7 +1628,7 @@
       }
     }
   },
-  "revision": "20231205",
+  "revision": "20231210",
   "rootUrl": "https://analyticsadmin.googleapis.com/",
   "schemas": {
     "GoogleAnalyticsAdminV1betaAccessBetweenFilter": {
diff --git a/googleapiclient/discovery_cache/documents/analyticsdata.v1beta.json b/googleapiclient/discovery_cache/documents/analyticsdata.v1beta.json
index 32db0fa738a..698cdd5fb8f 100644
--- a/googleapiclient/discovery_cache/documents/analyticsdata.v1beta.json
+++ b/googleapiclient/discovery_cache/documents/analyticsdata.v1beta.json
@@ -440,7 +440,7 @@
       }
     }
   },
-  "revision": "20231205",
+  "revision": "20231210",
   "rootUrl": "https://analyticsdata.googleapis.com/",
   "schemas": {
     "ActiveMetricRestriction": {
diff --git a/googleapiclient/discovery_cache/documents/androiddeviceprovisioning.v1.json b/googleapiclient/discovery_cache/documents/androiddeviceprovisioning.v1.json
index de02d21e1b6..ed5fd708c3c 100644
--- a/googleapiclient/discovery_cache/documents/androiddeviceprovisioning.v1.json
+++ b/googleapiclient/discovery_cache/documents/androiddeviceprovisioning.v1.json
@@ -851,7 +851,7 @@
       }
     }
   },
-  "revision": "20231204",
+  "revision": "20231211",
   "rootUrl": "https://androiddeviceprovisioning.googleapis.com/",
   "schemas": {
     "ClaimDeviceRequest": {
diff --git a/googleapiclient/discovery_cache/documents/androidenterprise.v1.json b/googleapiclient/discovery_cache/documents/androidenterprise.v1.json
index e850b152e36..bb7be91d4f8 100644
--- a/googleapiclient/discovery_cache/documents/androidenterprise.v1.json
+++ b/googleapiclient/discovery_cache/documents/androidenterprise.v1.json
@@ -2649,7 +2649,7 @@
       }
     }
   },
-  "revision": "20231206",
+  "revision": "20231210",
   "rootUrl": "https://androidenterprise.googleapis.com/",
   "schemas": {
     "Administrator": {
diff --git a/googleapiclient/discovery_cache/documents/androidpublisher.v3.json b/googleapiclient/discovery_cache/documents/androidpublisher.v3.json
index 63baf83e9a0..d1c6991ebed 100644
--- a/googleapiclient/discovery_cache/documents/androidpublisher.v3.json
+++ b/googleapiclient/discovery_cache/documents/androidpublisher.v3.json
@@ -2132,6 +2132,87 @@
     },
     "inappproducts": {
       "methods": {
+        "batchDelete": {
+          "description": "Deletes in-app products (managed products or subscriptions). Set the latencyTolerance field on nested requests to PRODUCT_UPDATE_LATENCY_TOLERANCE_LATENCY_TOLERANT to achieve maximum update throughput. This method should not be used to delete subscriptions. See [this article](https://android-developers.googleblog.com/2023/06/changes-to-google-play-developer-api-june-2023.html) for more information.",
+          "flatPath": "androidpublisher/v3/applications/{packageName}/inappproducts:batchDelete",
+          "httpMethod": "POST",
+          "id": "androidpublisher.inappproducts.batchDelete",
+          "parameterOrder": [
+            "packageName"
+          ],
+          "parameters": {
+            "packageName": {
+              "description": "Package name of the app.",
+              "location": "path",
+              "required": true,
+              "type": "string"
+            }
+          },
+          "path": "androidpublisher/v3/applications/{packageName}/inappproducts:batchDelete",
+          "request": {
+            "$ref": "InappproductsBatchDeleteRequest"
+          },
+          "scopes": [
+            "https://www.googleapis.com/auth/androidpublisher"
+          ]
+        },
+        "batchGet": {
+          "description": "Reads multiple in-app products, which can be managed products or subscriptions. This method should not be used to retrieve subscriptions. See [this article](https://android-developers.googleblog.com/2023/06/changes-to-google-play-developer-api-june-2023.html) for more information.",
+          "flatPath": "androidpublisher/v3/applications/{packageName}/inappproducts:batchGet",
+          "httpMethod": "GET",
+          "id": "androidpublisher.inappproducts.batchGet",
+          "parameterOrder": [
+            "packageName"
+          ],
+          "parameters": {
+            "packageName": {
+              "description": "Package name of the app.",
+              "location": "path",
+              "required": true,
+              "type": "string"
+            },
+            "sku": {
+              "description": "Unique identifier for the in-app products.",
+              "location": "query",
+              "repeated": true,
+              "type": "string"
+            }
+          },
+          "path": "androidpublisher/v3/applications/{packageName}/inappproducts:batchGet",
+          "response": {
+            "$ref": "InappproductsBatchGetResponse"
+          },
+          "scopes": [
+            "https://www.googleapis.com/auth/androidpublisher"
+          ]
+        },
+        "batchUpdate": {
+          "description": "Updates or inserts one or more in-app products (managed products or subscriptions). Set the latencyTolerance field on nested requests to PRODUCT_UPDATE_LATENCY_TOLERANCE_LATENCY_TOLERANT to achieve maximum update throughput. This method should no longer be used to update subscriptions. See [this article](https://android-developers.googleblog.com/2023/06/changes-to-google-play-developer-api-june-2023.html) for more information.",
+          "flatPath": "androidpublisher/v3/applications/{packageName}/inappproducts:batchUpdate",
+          "httpMethod": "POST",
+          "id": "androidpublisher.inappproducts.batchUpdate",
+          "parameterOrder": [
+            "packageName"
+          ],
+          "parameters": {
+            "packageName": {
+              "description": "Package name of the app.",
+              "location": "path",
+              "required": true,
+              "type": "string"
+            }
+          },
+          "path": "androidpublisher/v3/applications/{packageName}/inappproducts:batchUpdate",
+          "request": {
+            "$ref": "InappproductsBatchUpdateRequest"
+          },
+          "response": {
+            "$ref": "InappproductsBatchUpdateResponse"
+          },
+          "scopes": [
+            "https://www.googleapis.com/auth/androidpublisher"
+          ]
+        },
         "delete": {
           "description": "Deletes an in-app product (a managed product or a subscription). This method should no longer be used to delete subscriptions. See [this article](https://android-developers.googleblog.com/2023/06/changes-to-google-play-developer-api-june-2023.html) for more information.",
           "flatPath": "androidpublisher/v3/applications/{packageName}/inappproducts/{sku}",
@@ -2142,6 +2223,21 @@
             "sku"
           ],
           "parameters": {
+            "latencyTolerance": {
+              "description": "Optional. The latency tolerance for the propagation of this product update. Defaults to latency-sensitive.",
+              "enum": [
+                "PRODUCT_UPDATE_LATENCY_TOLERANCE_UNSPECIFIED",
+                "PRODUCT_UPDATE_LATENCY_TOLERANCE_LATENCY_SENSITIVE",
+                "PRODUCT_UPDATE_LATENCY_TOLERANCE_LATENCY_TOLERANT"
+              ],
+              "enumDescriptions": [
+                "Defaults to PRODUCT_UPDATE_LATENCY_TOLERANCE_LATENCY_SENSITIVE.",
+                "The update will propagate to clients within several minutes on average and up to a few hours in rare cases. Throughput is limited to 7,200 updates per app per hour.",
+                "The update will propagate to clients within 24 hours. Supports high throughput of up to 720,000 updates per app per hour using batch modification methods."
+              ],
+              "location": "query",
+              "type": "string"
+            },
             "packageName": {
               "description": "Package name of the app.",
               "location": "path",
@@ -2281,6 +2377,21 @@
               "location": "query",
               "type": "boolean"
             },
+            "latencyTolerance": {
+              "description": "Optional. The latency tolerance for the propagation of this product update. Defaults to latency-sensitive.",
+              "enum": [
+                "PRODUCT_UPDATE_LATENCY_TOLERANCE_UNSPECIFIED",
+                "PRODUCT_UPDATE_LATENCY_TOLERANCE_LATENCY_SENSITIVE",
+                "PRODUCT_UPDATE_LATENCY_TOLERANCE_LATENCY_TOLERANT"
+              ],
+              "enumDescriptions": [
+                "Defaults to PRODUCT_UPDATE_LATENCY_TOLERANCE_LATENCY_SENSITIVE.",
+                "The update will propagate to clients within several minutes on average and up to a few hours in rare cases. Throughput is limited to 7,200 updates per app per hour.",
+                "The update will propagate to clients within 24 hours. Supports high throughput of up to 720,000 updates per app per hour using batch modification methods."
+              ],
+              "location": "query",
+              "type": "string"
+            },
             "packageName": {
               "description": "Package name of the app.",
               "location": "path",
@@ -2325,6 +2436,21 @@
               "location": "query",
               "type": "boolean"
             },
+            "latencyTolerance": {
+              "description": "Optional. The latency tolerance for the propagation of this product update. Defaults to latency-sensitive.",
+              "enum": [
+                "PRODUCT_UPDATE_LATENCY_TOLERANCE_UNSPECIFIED",
+                "PRODUCT_UPDATE_LATENCY_TOLERANCE_LATENCY_SENSITIVE",
+                "PRODUCT_UPDATE_LATENCY_TOLERANCE_LATENCY_TOLERANT"
+              ],
+              "enumDescriptions": [
+                "Defaults to PRODUCT_UPDATE_LATENCY_TOLERANCE_LATENCY_SENSITIVE.",
+                "The update will propagate to clients within several minutes on average and up to a few hours in rare cases. Throughput is limited to 7,200 updates per app per hour.",
+                "The update will propagate to clients within 24 hours. Supports high throughput of up to 720,000 updates per app per hour using batch modification methods."
+              ],
+              "location": "query",
+              "type": "string"
+            },
             "packageName": {
               "description": "Package name of the app.",
               "location": "path",
@@ -2505,6 +2631,63 @@
                 "https://www.googleapis.com/auth/androidpublisher"
               ]
             },
+            "batchGet": {
+              "description": "Reads one or more subscriptions.",
+              "flatPath": "androidpublisher/v3/applications/{packageName}/subscriptions:batchGet",
+              "httpMethod": "GET",
+              "id": "androidpublisher.monetization.subscriptions.batchGet",
+              "parameterOrder": [
+                "packageName"
+              ],
+              "parameters": {
+                "packageName": {
+                  "description": "Required. The parent app (package name) for which the subscriptions should be retrieved. Must be equal to the package_name field on all the requests.",
+                  "location": "path",
+                  "required": true,
+                  "type": "string"
+                },
+                "productIds": {
+                  "description": "Required. A list of up to 100 subscription product IDs to retrieve. All the IDs must be different.",
+                  "location": "query",
+                  "repeated": true,
+                  "type": "string"
+                }
+              },
+              "path": "androidpublisher/v3/applications/{packageName}/subscriptions:batchGet",
+              "response": {
+                "$ref": "BatchGetSubscriptionsResponse"
+              },
+              "scopes": [
+                "https://www.googleapis.com/auth/androidpublisher"
+              ]
+            },
+            "batchUpdate": {
+              "description": "Updates a batch of subscriptions. Set the latencyTolerance field on nested requests to PRODUCT_UPDATE_LATENCY_TOLERANCE_LATENCY_TOLERANT to achieve maximum update throughput.",
+              "flatPath": "androidpublisher/v3/applications/{packageName}/subscriptions:batchUpdate",
+              "httpMethod": "POST",
+              "id": "androidpublisher.monetization.subscriptions.batchUpdate",
+              "parameterOrder": [
+                "packageName"
+              ],
+              "parameters": {
+                "packageName": {
+                  "description": "Required. The parent app (package name) for which the subscriptions should be updated. Must be equal to the package_name field on all the Subscription resources.",
+                  "location": "path",
+                  "required": true,
+                  "type": "string"
+                }
+              },
+              "path": "androidpublisher/v3/applications/{packageName}/subscriptions:batchUpdate",
+              "request": {
+                "$ref": "BatchUpdateSubscriptionsRequest"
+              },
+              "response": {
+                "$ref": "BatchUpdateSubscriptionsResponse"
+              },
+              "scopes": [
+                "https://www.googleapis.com/auth/androidpublisher"
+              ]
+            },
             "create": {
               "description": "Creates a new subscription. Newly added base plans will remain in draft state until activated.",
               "flatPath": "androidpublisher/v3/applications/{packageName}/subscriptions",
@@ -2651,6 +2834,26 @@
                 "productId"
               ],
               "parameters": {
+                "allowMissing": {
+                  "description": "Optional. If set to true, and the subscription with the given package_name and product_id doesn't exist, the subscription will be created. If a new subscription is created, update_mask is ignored.",
+                  "location": "query",
+                  "type": "boolean"
+                },
+                "latencyTolerance": {
+                  "description": "Optional. The latency tolerance for the propagation of this product update. Defaults to latency-sensitive.",
+                  "enum": [
+                    "PRODUCT_UPDATE_LATENCY_TOLERANCE_UNSPECIFIED",
+                    "PRODUCT_UPDATE_LATENCY_TOLERANCE_LATENCY_SENSITIVE",
+                    "PRODUCT_UPDATE_LATENCY_TOLERANCE_LATENCY_TOLERANT"
+                  ],
+                  "enumDescriptions": [
+                    "Defaults to PRODUCT_UPDATE_LATENCY_TOLERANCE_LATENCY_SENSITIVE.",
+                    "The update will propagate to clients within several minutes on average and up to a few hours in rare cases. Throughput is limited to 7,200 updates per app per hour.",
+                    "The update will propagate to clients within 24 hours. Supports high throughput of up to 720,000 updates per app per hour using batch modification methods."
+                  ],
+                  "location": "query",
+                  "type": "string"
+                },
                 "packageName": {
                   "description": "Immutable. Package name of the parent app.",
                   "location": "path",
@@ -2731,6 +2934,74 @@
                     "https://www.googleapis.com/auth/androidpublisher"
                   ]
                 },
+                "batchMigratePrices": {
+                  "description": "Batch variant of the MigrateBasePlanPrices endpoint. Set the latencyTolerance field on nested requests to PRODUCT_UPDATE_LATENCY_TOLERANCE_LATENCY_TOLERANT to achieve maximum update throughput.",
+                  "flatPath": "androidpublisher/v3/applications/{packageName}/subscriptions/{productId}/basePlans:batchMigratePrices",
+                  "httpMethod": "POST",
+                  "id": "androidpublisher.monetization.subscriptions.basePlans.batchMigratePrices",
+                  "parameterOrder": [
+                    "packageName",
+                    "productId"
+                  ],
+                  "parameters": {
+                    "packageName": {
+                      "description": "Required. The parent app (package name) for which the subscriptions should be created or updated. Must be equal to the package_name field on all the Subscription resources.",
+                      "location": "path",
+                      "required": true,
+                      "type": "string"
+                    },
+                    "productId": {
+                      "description": "Required. The product ID of the parent subscription, if all updated offers belong to the same subscription. If this batch update spans multiple subscriptions, set this field to \"-\". Must be set.",
+                      "location": "path",
+                      "required": true,
+                      "type": "string"
+                    }
+                  },
+                  "path": "androidpublisher/v3/applications/{packageName}/subscriptions/{productId}/basePlans:batchMigratePrices",
+                  "request": {
+                    "$ref": "BatchMigrateBasePlanPricesRequest"
+                  },
+                  "response": {
+                    "$ref": "BatchMigrateBasePlanPricesResponse"
+                  },
+                  "scopes": [
+                    "https://www.googleapis.com/auth/androidpublisher"
+                  ]
+                },
+                "batchUpdateStates": {
+                  "description": "Activates or deactivates base plans across one or multiple subscriptions. Set the latencyTolerance field on nested requests to PRODUCT_UPDATE_LATENCY_TOLERANCE_LATENCY_TOLERANT to achieve maximum update throughput.",
+                  "flatPath": "androidpublisher/v3/applications/{packageName}/subscriptions/{productId}/basePlans:batchUpdateStates",
+                  "httpMethod": "POST",
+                  "id": "androidpublisher.monetization.subscriptions.basePlans.batchUpdateStates",
+                  "parameterOrder": [
+                    "packageName",
+                    "productId"
+                  ],
+                  "parameters": {
+                    "packageName": {
+                      "description": "Required. The parent app (package name) of the updated base plans.",
+                      "location": "path",
+                      "required": true,
+                      "type": "string"
+                    },
+                    "productId": {
+                      "description": "Required. The product ID of the parent subscription, if all updated base plans belong to the same subscription. If this batch update spans multiple subscriptions, set this field to \"-\". Must be set.",
+                      "location": "path",
+                      "required": true,
+                      "type": "string"
+                    }
+                  },
+                  "path": "androidpublisher/v3/applications/{packageName}/subscriptions/{productId}/basePlans:batchUpdateStates",
+                  "request": {
+                    "$ref": "BatchUpdateBasePlanStatesRequest"
+                  },
+                  "response": {
+                    "$ref": "BatchUpdateBasePlanStatesResponse"
+                  },
+                  "scopes": [
+                    "https://www.googleapis.com/auth/androidpublisher"
+                  ]
+                },
                 "deactivate": {
                   "description": "Deactivates a base plan. Once deactivated, the base plan will become unavailable to new subscribers, but existing subscribers will maintain their subscription",
                   "flatPath": "androidpublisher/v3/applications/{packageName}/subscriptions/{productId}/basePlans/{basePlanId}:deactivate",
@@ -2900,6 +3171,129 @@
                         "https://www.googleapis.com/auth/androidpublisher"
                       ]
                     },
+                    "batchGet": {
+                      "description": "Reads one or more subscription offers.",
+                      "flatPath": "androidpublisher/v3/applications/{packageName}/subscriptions/{productId}/basePlans/{basePlanId}/offers:batchGet",
+                      "httpMethod": "POST",
+                      "id": "androidpublisher.monetization.subscriptions.basePlans.offers.batchGet",
+                      "parameterOrder": [
+                        "packageName",
+                        "productId",
+                        "basePlanId"
+                      ],
+                      "parameters": {
+                        "basePlanId": {
+                          "description": "Required. The parent base plan (ID) for which the offers should be read. May be specified as '-' to read offers from multiple base plans.",
+                          "location": "path",
+                          "required": true,
+                          "type": "string"
+                        },
+                        "packageName": {
+                          "description": "Required. The parent app (package name) for which the subscriptions should be created or updated. Must be equal to the package_name field on all the requests.",
+                          "location": "path",
+                          "required": true,
+                          "type": "string"
+                        },
+                        "productId": {
+                          "description": "Required. The product ID of the parent subscription, if all updated offers belong to the same subscription. If this request spans multiple subscriptions, set this field to \"-\". Must be set.",
+                          "location": "path",
+                          "required": true,
+                          "type": "string"
+                        }
+                      },
+                      "path": "androidpublisher/v3/applications/{packageName}/subscriptions/{productId}/basePlans/{basePlanId}/offers:batchGet",
+                      "request": {
+                        "$ref": "BatchGetSubscriptionOffersRequest"
+                      },
+                      "response": {
+                        "$ref": "BatchGetSubscriptionOffersResponse"
+                      },
+                      "scopes": [
+                        "https://www.googleapis.com/auth/androidpublisher"
+                      ]
+                    },
+                    "batchUpdate": {
+                      "description": "Updates a batch of subscription offers. Set the latencyTolerance field on nested requests to PRODUCT_UPDATE_LATENCY_TOLERANCE_LATENCY_TOLERANT to achieve maximum update throughput.",
+                      "flatPath": "androidpublisher/v3/applications/{packageName}/subscriptions/{productId}/basePlans/{basePlanId}/offers:batchUpdate",
+                      "httpMethod": "POST",
+                      "id": "androidpublisher.monetization.subscriptions.basePlans.offers.batchUpdate",
+                      "parameterOrder": [
+                        "packageName",
+                        "productId",
+                        "basePlanId"
+                      ],
+                      "parameters": {
+                        "basePlanId": {
+                          "description": "Required. The parent base plan (ID) for which the offers should be updated. May be specified as '-' to update offers from multiple base plans.",
+                          "location": "path",
+                          "required": true,
+                          "type": "string"
+                        },
+                        "packageName": {
+                          "description": "Required. The parent app (package name) of the updated subscription offers. Must be equal to the package_name field on all the updated SubscriptionOffer resources.",
+                          "location": "path",
+                          "required": true,
+                          "type": "string"
+                        },
+                        "productId": {
+                          "description": "Required. The product ID of the parent subscription, if all updated offers belong to the same subscription. If this request spans multiple subscriptions, set this field to \"-\". Must be set.",
+                          "location": "path",
+                          "required": true,
+                          "type": "string"
+                        }
+                      },
+                      "path": "androidpublisher/v3/applications/{packageName}/subscriptions/{productId}/basePlans/{basePlanId}/offers:batchUpdate",
+                      "request": {
+                        "$ref": "BatchUpdateSubscriptionOffersRequest"
+                      },
+                      "response": {
+                        "$ref": "BatchUpdateSubscriptionOffersResponse"
+                      },
+                      "scopes": [
+                        "https://www.googleapis.com/auth/androidpublisher"
+                      ]
+                    },
+                    "batchUpdateStates": {
+                      "description": "Updates a batch of subscription offer states. Set the latencyTolerance field on nested requests to PRODUCT_UPDATE_LATENCY_TOLERANCE_LATENCY_TOLERANT to achieve maximum update throughput.",
+                      "flatPath": "androidpublisher/v3/applications/{packageName}/subscriptions/{productId}/basePlans/{basePlanId}/offers:batchUpdateStates",
+                      "httpMethod": "POST",
+                      "id": "androidpublisher.monetization.subscriptions.basePlans.offers.batchUpdateStates",
+                      "parameterOrder": [
+                        "packageName",
+                        "productId",
+                        "basePlanId"
+                      ],
+                      "parameters": {
+                        "basePlanId": {
+                          "description": "Required. The parent base plan (ID) for which the offers should be updated. May be specified as '-' to update offers from multiple base plans.",
+                          "location": "path",
+                          "required": true,
+                          "type": "string"
+                        },
+                        "packageName": {
+                          "description": "Required. The parent app (package name) of the updated subscription offers. Must be equal to the package_name field on all the updated SubscriptionOffer resources.",
+                          "location": "path",
+                          "required": true,
+                          "type": "string"
+                        },
+                        "productId": {
+                          "description": "Required. The product ID of the parent subscription, if all updated offers belong to the same subscription. If this request spans multiple subscriptions, set this field to \"-\". Must be set.",
+                          "location": "path",
+                          "required": true,
+                          "type": "string"
+                        }
+                      },
+                      "path": "androidpublisher/v3/applications/{packageName}/subscriptions/{productId}/basePlans/{basePlanId}/offers:batchUpdateStates",
+                      "request": {
+                        "$ref": "BatchUpdateSubscriptionOfferStatesRequest"
+                      },
+                      "response": {
+                        "$ref": "BatchUpdateSubscriptionOfferStatesResponse"
+                      },
+                      "scopes": [
+                        "https://www.googleapis.com/auth/androidpublisher"
+                      ]
+                    },
                     "create": {
                       "description": "Creates a new subscription offer. Only auto-renewing base plans can have subscription offers. The offer state will be DRAFT until it is activated.",
                       "flatPath": "androidpublisher/v3/applications/{packageName}/subscriptions/{productId}/basePlans/{basePlanId}/offers",
@@ -3147,12 +3541,32 @@
                         "offerId"
                       ],
                       "parameters": {
+                        "allowMissing": {
+                          "description": "Optional. If set to true, and the subscription offer with the given package_name, product_id, base_plan_id and offer_id doesn't exist, an offer will be created. If a new offer is created, update_mask is ignored.",
+                          "location": "query",
+                          "type": "boolean"
+                        },
                         "basePlanId": {
                           "description": "Required. Immutable. The ID of the base plan to which this offer is an extension.",
                           "location": "path",
                           "required": true,
                           "type": "string"
                         },
+                        "latencyTolerance": {
+                          "description": "Optional. The latency tolerance for the propagation of this product update. Defaults to latency-sensitive.",
+                          "enum": [
+                            "PRODUCT_UPDATE_LATENCY_TOLERANCE_UNSPECIFIED",
+                            "PRODUCT_UPDATE_LATENCY_TOLERANCE_LATENCY_SENSITIVE",
+                            "PRODUCT_UPDATE_LATENCY_TOLERANCE_LATENCY_TOLERANT"
+                          ],
+                          "enumDescriptions": [
+                            "Defaults to PRODUCT_UPDATE_LATENCY_TOLERANCE_LATENCY_SENSITIVE.",
+                            "The update will propagate to clients within several minutes on average and up to a few hours in rare cases. Throughput is limited to 7,200 updates per app per hour.",
+                            "The update will propagate to clients within 24 hours. Supports high throughput of up to 720,000 updates per app per hour using batch modification methods."
+                          ],
+                          "location": "query",
+                          "type": "string"
+                        },
                         "offerId": {
                           "description": "Required. Immutable. Unique ID of this subscription offer. Must be unique within the base plan.",
                           "location": "path",
@@ -4081,7 +4495,7 @@
       }
     }
   },
-  "revision": "20231207",
+  "revision": "20231212",
   "rootUrl": "https://androidpublisher.googleapis.com/",
   "schemas": {
     "Abi": {
@@ -4148,13 +4562,71 @@
     "ActivateBasePlanRequest": {
       "description": "Request message for ActivateBasePlan.",
       "id": "ActivateBasePlanRequest",
-      "properties": {},
+      "properties": {
+        "basePlanId": {
+          "description": "Required. The unique base plan ID of the base plan to activate.",
+          "type": "string"
+        },
+        "latencyTolerance": {
+          "description": "Optional. The latency tolerance for the propagation of this product update. Defaults to latency-sensitive.",
+          "enum": [
+            "PRODUCT_UPDATE_LATENCY_TOLERANCE_UNSPECIFIED",
+            "PRODUCT_UPDATE_LATENCY_TOLERANCE_LATENCY_SENSITIVE",
+            "PRODUCT_UPDATE_LATENCY_TOLERANCE_LATENCY_TOLERANT"
+          ],
+          "enumDescriptions": [
+            "Defaults to PRODUCT_UPDATE_LATENCY_TOLERANCE_LATENCY_SENSITIVE.",
+            "The update will propagate to clients within several minutes on average and up to a few hours in rare cases. Throughput is limited to 7,200 updates per app per hour.",
+            "The update will propagate to clients within 24 hours. Supports high throughput of up to 720,000 updates per app per hour using batch modification methods."
+          ],
+          "type": "string"
+        },
+        "packageName": {
+          "description": "Required. The parent app (package name) of the base plan to activate.",
+          "type": "string"
+        },
+        "productId": {
+          "description": "Required. The parent subscription (ID) of the base plan to activate.",
+          "type": "string"
+        }
+      },
       "type": "object"
     },
     "ActivateSubscriptionOfferRequest": {
       "description": "Request message for ActivateSubscriptionOffer.",
       "id": "ActivateSubscriptionOfferRequest",
-      "properties": {},
+      "properties": {
+        "basePlanId": {
+          "description": "Required. The parent base plan (ID) of the offer to activate.",
+          "type": "string"
+        },
+        "latencyTolerance": {
+          "description": "Optional. The latency tolerance for the propagation of this product update. Defaults to latency-sensitive.",
+          "enum": [
+            "PRODUCT_UPDATE_LATENCY_TOLERANCE_UNSPECIFIED",
+            "PRODUCT_UPDATE_LATENCY_TOLERANCE_LATENCY_SENSITIVE",
+            "PRODUCT_UPDATE_LATENCY_TOLERANCE_LATENCY_TOLERANT"
+          ],
+          "enumDescriptions": [
+            "Defaults to PRODUCT_UPDATE_LATENCY_TOLERANCE_LATENCY_SENSITIVE.",
+            "The update will propagate to clients within several minutes on average and up to a few hours in rare cases. Throughput is limited to 7,200 updates per app per hour.",
+            "The update will propagate to clients within 24 hours. Supports high throughput of up to 720,000 updates per app per hour using batch modification methods."
+          ],
+          "type": "string"
+        },
+        "offerId": {
+          "description": "Required. The unique offer ID of the offer to activate.",
+          "type": "string"
+        },
+        "packageName": {
+          "description": "Required. The parent app (package name) of the offer to activate.",
+          "type": "string"
+        },
+        "productId": {
+          "description": "Required. The parent subscription (ID) of the offer to activate.",
+          "type": "string"
+        }
+      },
       "type": "object"
     },
     "Apk": {
@@ -4450,71 +4922,252 @@
       },
       "type": "object"
     },
-    "AutoRenewingPlan": {
-      "description": "Information related to an auto renewing plan.",
-      "id": "AutoRenewingPlan",
+    "AutoRenewingPlan": {
+      "description": "Information related to an auto renewing plan.",
+      "id": "AutoRenewingPlan",
+      "properties": {
+        "autoRenewEnabled": {
+          "description": "If the subscription is currently set to auto-renew, e.g. the user has not canceled the subscription",
+          "type": "boolean"
+        },
+        "priceChangeDetails": {
+          "$ref": "SubscriptionItemPriceChangeDetails",
+          "description": "The information of the last price change for the item since subscription signup."
+        }
+      },
+      "type": "object"
+    },
+    "BasePlan": {
+      "description": "A single base plan for a subscription.",
+      "id": "BasePlan",
+      "properties": {
+        "autoRenewingBasePlanType": {
+          "$ref": "AutoRenewingBasePlanType",
+          "description": "Set when the base plan automatically renews at a regular interval."
+        },
+        "basePlanId": {
+          "description": "Required. Immutable. The unique identifier of this base plan. Must be unique within the subscription, and conform with RFC-1034. That is, this ID can only contain lower-case letters (a-z), numbers (0-9), and hyphens (-), and be at most 63 characters.",
+          "type": "string"
+        },
+        "offerTags": {
+          "description": "List of up to 20 custom tags specified for this base plan, and returned to the app through the billing library. Subscription offers for this base plan will also receive these offer tags in the billing library.",
+          "items": {
+            "$ref": "OfferTag"
+          },
+          "type": "array"
+        },
+        "otherRegionsConfig": {
+          "$ref": "OtherRegionsBasePlanConfig",
+          "description": "Pricing information for any new locations Play may launch in the future. If omitted, the BasePlan will not be automatically available any new locations Play may launch in the future."
+        },
+        "prepaidBasePlanType": {
+          "$ref": "PrepaidBasePlanType",
+          "description": "Set when the base plan does not automatically renew at the end of the billing period."
+        },
+        "regionalConfigs": {
+          "description": "Region-specific information for this base plan.",
+          "items": {
+            "$ref": "RegionalBasePlanConfig"
+          },
+          "type": "array"
+        },
+        "state": {
+          "description": "Output only. The state of the base plan, i.e. whether it's active. Draft and inactive base plans can be activated or deleted. Active base plans can be made inactive. Inactive base plans can be canceled. This field cannot be changed by updating the resource. Use the dedicated endpoints instead.",
+          "enum": [
+            "STATE_UNSPECIFIED",
+            "DRAFT",
+            "ACTIVE",
+            "INACTIVE"
+          ],
+          "enumDescriptions": [
+            "Unspecified state.",
+            "The base plan is currently in a draft state, and hasn't been activated. It can be safely deleted at this point.",
+            "The base plan is active and available for new subscribers.",
+            "The base plan is inactive and only available for existing subscribers."
+          ],
+          "readOnly": true,
+          "type": "string"
+        }
+      },
+      "type": "object"
+    },
+    "BatchGetSubscriptionOffersRequest": {
+      "description": "Request message for BatchGetSubscriptionOffers endpoint.",
+      "id": "BatchGetSubscriptionOffersRequest",
+      "properties": {
+        "requests": {
+          "description": "Required. A list of update requests of up to 100 elements. All requests must update different subscriptions.",
+          "items": {
+            "$ref": "GetSubscriptionOfferRequest"
+          },
+          "type": "array"
+        }
+      },
+      "type": "object"
+    },
+    "BatchGetSubscriptionOffersResponse": {
+      "description": "Response message for BatchGetSubscriptionOffers endpoint.",
+      "id": "BatchGetSubscriptionOffersResponse",
+      "properties": {
+        "subscriptionOffers": {
+          "items": {
+            "$ref": "SubscriptionOffer"
+          },
+          "type": "array"
+        }
+      },
+      "type": "object"
+    },
+    "BatchGetSubscriptionsResponse": {
+      "description": "Response message for BatchGetSubscriptions endpoint.",
+      "id": "BatchGetSubscriptionsResponse",
+      "properties": {
+        "subscriptions": {
+          "description": "The list of requested subscriptions, in the same order as the request.",
+          "items": {
+            "$ref": "Subscription"
+          },
+          "type": "array"
+        }
+      },
+      "type": "object"
+    },
+    "BatchMigrateBasePlanPricesRequest": {
+      "description": "Request message for BatchMigrateBasePlanPrices.",
+      "id": "BatchMigrateBasePlanPricesRequest",
+      "properties": {
+        "requests": {
+          "description": "Required. Up to 100 price migration requests. All requests must update different base plans.",
+          "items": {
+            "$ref": "MigrateBasePlanPricesRequest"
+          },
+          "type": "array"
+        }
+      },
+      "type": "object"
+    },
+    "BatchMigrateBasePlanPricesResponse": {
+      "description": "Response message for BatchMigrateBasePlanPrices.",
+      "id": "BatchMigrateBasePlanPricesResponse",
+      "properties": {
+        "responses": {
+          "description": "Contains one response per requested price migration, in the same order as the request.",
+          "items": {
+            "$ref": "MigrateBasePlanPricesResponse"
+          },
+          "type": "array"
+        }
+      },
+      "type": "object"
+    },
+    "BatchUpdateBasePlanStatesRequest": {
+      "description": "Request message for BatchUpdateBasePlanStates.",
+      "id": "BatchUpdateBasePlanStatesRequest",
+      "properties": {
+        "requests": {
+          "description": "Required. The update request list of up to 100 elements. All requests must update different base plans.",
+          "items": {
+            "$ref": "UpdateBasePlanStateRequest"
+          },
+          "type": "array"
+        }
+      },
+      "type": "object"
+    },
+    "BatchUpdateBasePlanStatesResponse": {
+      "description": "Response message for BatchUpdateBasePlanStates.",
+      "id": "BatchUpdateBasePlanStatesResponse",
+      "properties": {
+        "subscriptions": {
+          "description": "The list of updated subscriptions. This list will match the requests one to one, in the same order.",
+          "items": {
+            "$ref": "Subscription"
+          },
+          "type": "array"
+        }
+      },
+      "type": "object"
+    },
+    "BatchUpdateSubscriptionOfferStatesRequest": {
+      "description": "Request message for BatchUpdateSubscriptionOfferStates.",
+      "id": "BatchUpdateSubscriptionOfferStatesRequest",
+      "properties": {
+        "requests": {
+          "description": "Required. The update request list of up to 100 elements. All requests must update different offers.",
+          "items": {
+            "$ref": "UpdateSubscriptionOfferStateRequest"
+          },
+          "type": "array"
+        }
+      },
+      "type": "object"
+    },
+    "BatchUpdateSubscriptionOfferStatesResponse": {
+      "description": "Response message for BatchUpdateSubscriptionOfferStates.",
+      "id": "BatchUpdateSubscriptionOfferStatesResponse",
+      "properties": {
+        "subscriptionOffers": {
+          "description": "The updated subscription offers list.",
+          "items": {
+            "$ref": "SubscriptionOffer"
+          },
+          "type": "array"
+        }
+      },
+      "type": "object"
+    },
+    "BatchUpdateSubscriptionOffersRequest": {
+      "description": "Request message for BatchUpdateSubscriptionOffers.",
+      "id": "BatchUpdateSubscriptionOffersRequest",
+      "properties": {
+        "requests": {
+          "description": "Required. A list of update requests of up to 100 elements. All requests must update different subscription offers.",
+          "items": {
+            "$ref": "UpdateSubscriptionOfferRequest"
+          },
+          "type": "array"
+        }
+      },
+      "type": "object"
+    },
+    "BatchUpdateSubscriptionOffersResponse": {
+      "description": "Response message for BatchUpdateSubscriptionOffers.",
+      "id": "BatchUpdateSubscriptionOffersResponse",
       "properties": {
-        "autoRenewEnabled": {
-          "description": "If the subscription is currently set to auto-renew, e.g. the user has not canceled the subscription",
-          "type": "boolean"
-        },
-        "priceChangeDetails": {
-          "$ref": "SubscriptionItemPriceChangeDetails",
-          "description": "The information of the last price change for the item since subscription signup."
+        "subscriptionOffers": {
+          "description": "The updated subscription offers list.",
+          "items": {
+            "$ref": "SubscriptionOffer"
+          },
+          "type": "array"
         }
       },
       "type": "object"
     },
-    "BasePlan": {
-      "description": "A single base plan for a subscription.",
-      "id": "BasePlan",
+    "BatchUpdateSubscriptionsRequest": {
+      "description": "Request message for BatchUpdateSubscription.",
+      "id": "BatchUpdateSubscriptionsRequest",
       "properties": {
-        "autoRenewingBasePlanType": {
-          "$ref": "AutoRenewingBasePlanType",
-          "description": "Set when the base plan automatically renews at a regular interval."
-        },
-        "basePlanId": {
-          "description": "Required. Immutable. The unique identifier of this base plan. Must be unique within the subscription, and conform with RFC-1034. That is, this ID can only contain lower-case letters (a-z), numbers (0-9), and hyphens (-), and be at most 63 characters.",
-          "type": "string"
-        },
-        "offerTags": {
-          "description": "List of up to 20 custom tags specified for this base plan, and returned to the app through the billing library. Subscription offers for this base plan will also receive these offer tags in the billing library.",
+        "requests": {
+          "description": "Required. A list of update requests of up to 100 elements. All requests must update different subscriptions.",
           "items": {
-            "$ref": "OfferTag"
+            "$ref": "UpdateSubscriptionRequest"
           },
           "type": "array"
-        },
-        "otherRegionsConfig": {
-          "$ref": "OtherRegionsBasePlanConfig",
-          "description": "Pricing information for any new locations Play may launch in the future. If omitted, the BasePlan will not be automatically available any new locations Play may launch in the future."
-        },
-        "prepaidBasePlanType": {
-          "$ref": "PrepaidBasePlanType",
-          "description": "Set when the base plan does not automatically renew at the end of the billing period."
-        },
-        "regionalConfigs": {
-          "description": "Region-specific information for this base plan.",
+        }
+      },
+      "type": "object"
+    },
+    "BatchUpdateSubscriptionsResponse": {
+      "description": "Response message for BatchUpdateSubscription.",
+      "id": "BatchUpdateSubscriptionsResponse",
+      "properties": {
+        "subscriptions": {
+          "description": "The updated subscriptions list.",
           "items": {
-            "$ref": "RegionalBasePlanConfig"
+            "$ref": "Subscription"
           },
           "type": "array"
-        },
-        "state": {
-          "description": "Output only. The state of the base plan, i.e. whether it's active. Draft and inactive base plans can be activated or deleted. Active base plans can be made inactive. Inactive base plans can be canceled. This field cannot be changed by updating the resource. Use the dedicated endpoints instead.",
-          "enum": [
-            "STATE_UNSPECIFIED",
-            "DRAFT",
-            "ACTIVE",
-            "INACTIVE"
-          ],
-          "enumDescriptions": [
-            "Unspecified state.",
-            "The base plan is currently in a draft state, and hasn't been activated. It can be safely deleted at this point.",
-            "The base plan is active and available for new subscribers.",
-            "The base plan is inactive and only available for existing subscribers."
-          ],
-          "readOnly": true,
-          "type": "string"
         }
       },
       "type": "object"
@@ -4710,13 +5363,71 @@
     "DeactivateBasePlanRequest": {
       "description": "Request message for DeactivateBasePlan.",
       "id": "DeactivateBasePlanRequest",
-      "properties": {},
+      "properties": {
+        "basePlanId": {
+          "description": "Required. The unique base plan ID of the base plan to deactivate.",
+          "type": "string"
+        },
+        "latencyTolerance": {
+          "description": "Optional. The latency tolerance for the propagation of this product update. Defaults to latency-sensitive.",
+          "enum": [
+            "PRODUCT_UPDATE_LATENCY_TOLERANCE_UNSPECIFIED",
+            "PRODUCT_UPDATE_LATENCY_TOLERANCE_LATENCY_SENSITIVE",
+            "PRODUCT_UPDATE_LATENCY_TOLERANCE_LATENCY_TOLERANT"
+          ],
+          "enumDescriptions": [
+            "Defaults to PRODUCT_UPDATE_LATENCY_TOLERANCE_LATENCY_SENSITIVE.",
+            "The update will propagate to clients within several minutes on average and up to a few hours in rare cases. Throughput is limited to 7,200 updates per app per hour.",
+            "The update will propagate to clients within 24 hours. Supports high throughput of up to 720,000 updates per app per hour using batch modification methods."
+          ],
+          "type": "string"
+        },
+        "packageName": {
+          "description": "Required. The parent app (package name) of the base plan to deactivate.",
+          "type": "string"
+        },
+        "productId": {
+          "description": "Required. The parent subscription (ID) of the base plan to deactivate.",
+          "type": "string"
+        }
+      },
       "type": "object"
     },
     "DeactivateSubscriptionOfferRequest": {
       "description": "Request message for DeactivateSubscriptionOffer.",
       "id": "DeactivateSubscriptionOfferRequest",
-      "properties": {},
+      "properties": {
+        "basePlanId": {
+          "description": "Required. The parent base plan (ID) of the offer to deactivate.",
+          "type": "string"
+        },
+        "latencyTolerance": {
+          "description": "Optional. The latency tolerance for the propagation of this product update. Defaults to latency-sensitive.",
+          "enum": [
+            "PRODUCT_UPDATE_LATENCY_TOLERANCE_UNSPECIFIED",
+            "PRODUCT_UPDATE_LATENCY_TOLERANCE_LATENCY_SENSITIVE",
+            "PRODUCT_UPDATE_LATENCY_TOLERANCE_LATENCY_TOLERANT"
+          ],
+          "enumDescriptions": [
+            "Defaults to PRODUCT_UPDATE_LATENCY_TOLERANCE_LATENCY_SENSITIVE.",
+            "The update will propagate to clients within several minutes on average and up to a few hours in rare cases. Throughput is limited to 7,200 updates per app per hour.",
+            "The update will propagate to clients within 24 hours. Supports high throughput of up to 720,000 updates per app per hour using batch modification methods."
+          ],
+          "type": "string"
+        },
+        "offerId": {
+          "description": "Required. The unique offer ID of the offer to deactivate.",
+          "type": "string"
+        },
+        "packageName": {
+          "description": "Required. The parent app (package name) of the offer to deactivate.",
+          "type": "string"
+        },
+        "productId": {
+          "description": "Required. The parent subscription (ID) of the offer to deactivate.",
+          "type": "string"
+        }
+      },
       "type": "object"
     },
     "DeferredItemReplacement": {
@@ -5430,6 +6141,29 @@
       },
       "type": "object"
     },
+    "GetSubscriptionOfferRequest": {
+      "description": "Request message for GetSubscriptionOffer.",
+      "id": "GetSubscriptionOfferRequest",
+      "properties": {
+        "basePlanId": {
+          "description": "Required. The parent base plan (ID) of the offer to get.",
+          "type": "string"
+        },
+        "offerId": {
+          "description": "Required. The unique offer ID of the offer to get.",
+          "type": "string"
+        },
+        "packageName": {
+          "description": "Required. The parent app (package name) of the offer to get.",
+          "type": "string"
+        },
+        "productId": {
+          "description": "Required. The parent subscription (ID) of the offer to get.",
+          "type": "string"
+        }
+      },
+      "type": "object"
+    },
     "Grant": {
       "description": "An access grant resource.",
       "id": "Grant",
@@ -5669,6 +6403,91 @@
       },
       "type": "object"
     },
+    "InappproductsBatchDeleteRequest": {
+      "description": "Request to delete multiple in-app products.",
+      "id": "InappproductsBatchDeleteRequest",
+      "properties": {
+        "requests": {
+          "description": "Individual delete requests. At least one request is required. Can contain up to 100 requests. All requests must correspond to different in-app products.",
+          "items": {
+            "$ref": "InappproductsDeleteRequest"
+          },
+          "type": "array"
+        }
+      },
+      "type": "object"
+    },
+    "InappproductsBatchGetResponse": {
+      "description": "Response message for BatchGetSubscriptions endpoint.",
+      "id": "InappproductsBatchGetResponse",
+      "properties": {
+        "inappproduct": {
+          "description": "The list of requested in-app products, in the same order as the request.",
+          "items": {
+            "$ref": "InAppProduct"
+          },
+          "type": "array"
+        }
+      },
+      "type": "object"
+    },
+    "InappproductsBatchUpdateRequest": {
+      "description": "Request to update or insert one or more in-app products.",
+      "id": "InappproductsBatchUpdateRequest",
+      "properties": {
+        "requests": {
+          "description": "Required. Individual update requests. At least one request is required. Can contain up to 100 requests. All requests must correspond to different in-app products.",
+          "items": {
+            "$ref": "InappproductsUpdateRequest"
+          },
+          "type": "array"
+        }
+      },
+      "type": "object"
+    },
+    "InappproductsBatchUpdateResponse": {
+      "description": "Response for a batch in-app product update.",
+      "id": "InappproductsBatchUpdateResponse",
+      "properties": {
+        "inappproducts": {
+          "description": "The updated or inserted in-app products.",
+          "items": {
+            "$ref": "InAppProduct"
+          },
+          "type": "array"
+        }
+      },
+      "type": "object"
+    },
+    "InappproductsDeleteRequest": {
+      "description": "Request to delete an in-app product.",
+      "id": "InappproductsDeleteRequest",
+      "properties": {
+        "latencyTolerance": {
+          "description": "Optional. The latency tolerance for the propagation of this product update. Defaults to latency-sensitive.",
+          "enum": [
+            "PRODUCT_UPDATE_LATENCY_TOLERANCE_UNSPECIFIED",
+            "PRODUCT_UPDATE_LATENCY_TOLERANCE_LATENCY_SENSITIVE",
+            "PRODUCT_UPDATE_LATENCY_TOLERANCE_LATENCY_TOLERANT"
+          ],
+          "enumDescriptions": [
+            "Defaults to PRODUCT_UPDATE_LATENCY_TOLERANCE_LATENCY_SENSITIVE.",
+            "The update will propagate to clients within several minutes on average and up to a few hours in rare cases. Throughput is limited to 7,200 updates per app per hour.",
+            "The update will propagate to clients within 24 hours. Supports high throughput of up to 720,000 updates per app per hour using batch modification methods."
+          ],
+          "type": "string"
+        },
+        "packageName": {
+          "description": "Package name of the app.",
+          "type": "string"
+        },
+        "sku": {
+          "description": "Unique identifier for the in-app product.",
+          "type": "string"
+        }
+      },
+      "type": "object"
+    },
     "InappproductsListResponse": {
       "description": "Response listing all in-app products.",
       "id": "InappproductsListResponse",
@@ -5696,6 +6515,47 @@
       },
       "type": "object"
     },
+    "InappproductsUpdateRequest": {
+      "description": "Request to update an in-app product.",
+      "id": "InappproductsUpdateRequest",
+      "properties": {
+        "allowMissing": {
+          "description": "If set to true, and the in-app product with the given package_name and sku doesn't exist, the in-app product will be created.",
+          "type": "boolean"
+        },
+        "autoConvertMissingPrices": {
+          "description": "If true the prices for all regions targeted by the parent app that don't have a price specified for this in-app product will be auto converted to the target currency based on the default price. Defaults to false.",
+          "type": "boolean"
+        },
+        "inappproduct": {
+          "$ref": "InAppProduct",
+          "description": "The new in-app product."
+        },
+        "latencyTolerance": {
+          "description": "Optional. The latency tolerance for the propagation of this product update. Defaults to latency-sensitive.",
+          "enum": [
+            "PRODUCT_UPDATE_LATENCY_TOLERANCE_UNSPECIFIED",
+            "PRODUCT_UPDATE_LATENCY_TOLERANCE_LATENCY_SENSITIVE",
+            "PRODUCT_UPDATE_LATENCY_TOLERANCE_LATENCY_TOLERANT"
+          ],
+          "enumDescriptions": [
+            "Defaults to PRODUCT_UPDATE_LATENCY_TOLERANCE_LATENCY_SENSITIVE.",
+            "The update will propagate to clients within several minutes on average and up to a few hours in rare cases. Throughput is limited to 7,200 updates per app per hour.",
+            "The update will propagate to clients within 24 hours. Supports high throughput of up to 720,000 updates per app per hour using batch modification methods."
+          ],
+          "type": "string"
+        },
+        "packageName": {
+          "description": "Package name of the app.",
+          "type": "string"
+        },
+        "sku": {
+          "description": "Unique identifier for the in-app product.",
+          "type": "string"
+        }
+      },
+      "type": "object"
+    },
     "InternalAppSharingArtifact": {
       "description": "An artifact resource which gets created when uploading an APK or Android App Bundle through internal app sharing.",
       "id": "InternalAppSharingArtifact",
@@ -5929,6 +6789,32 @@
       "description": "Request message for MigrateBasePlanPrices.",
       "id": "MigrateBasePlanPricesRequest",
       "properties": {
+        "basePlanId": {
+          "description": "Required. The unique base plan ID of the base plan to update prices on.",
+          "type": "string"
+        },
+        "latencyTolerance": {
+          "description": "Optional. The latency tolerance for the propagation of this product update. Defaults to latency-sensitive.",
+          "enum": [
+            "PRODUCT_UPDATE_LATENCY_TOLERANCE_UNSPECIFIED",
+            "PRODUCT_UPDATE_LATENCY_TOLERANCE_LATENCY_SENSITIVE",
+            "PRODUCT_UPDATE_LATENCY_TOLERANCE_LATENCY_TOLERANT"
+          ],
+          "enumDescriptions": [
+            "Defaults to PRODUCT_UPDATE_LATENCY_TOLERANCE_LATENCY_SENSITIVE.",
+            "The update will propagate to clients within several minutes on average and up to a few hours in rare cases. Throughput is limited to 7,200 updates per app per hour.",
+            "The update will propagate to clients within 24 hours. Supports high throughput of up to 720,000 updates per app per hour using batch modification methods."
+          ],
+          "type": "string"
+        },
+        "packageName": {
+          "description": "Required. Package name of the parent app. Must be equal to the package_name field on the Subscription resource.",
+          "type": "string"
+        },
+        "productId": {
+          "description": "Required. The ID of the subscription to update. Must be equal to the product_id field on the Subscription resource.",
+          "type": "string"
+        },
         "regionalPriceMigrations": {
           "description": "Required. The regional prices to update.",
           "items": {
@@ -7787,6 +8673,112 @@
       },
       "type": "object"
     },
+    "UpdateBasePlanStateRequest": {
+      "description": "Request message to update the state of a subscription base plan.",
+      "id": "UpdateBasePlanStateRequest",
+      "properties": {
+        "activateBasePlanRequest": {
+          "$ref": "ActivateBasePlanRequest",
+          "description": "Activates a base plan. Once activated, base plans will be available to new subscribers."
+        },
+        "deactivateBasePlanRequest": {
+          "$ref": "DeactivateBasePlanRequest",
+          "description": "Deactivates a base plan. Once deactivated, the base plan will become unavailable to new subscribers, but existing subscribers will maintain their subscription"
+        }
+      },
+      "type": "object"
+    },
+    "UpdateSubscriptionOfferRequest": {
+      "description": "Request message for UpdateSubscriptionOffer.",
+      "id": "UpdateSubscriptionOfferRequest",
+      "properties": {
+        "allowMissing": {
+          "description": "Optional. If set to true, and the subscription offer with the given package_name, product_id, base_plan_id and offer_id doesn't exist, an offer will be created. If a new offer is created, update_mask is ignored.",
+          "type": "boolean"
+        },
+        "latencyTolerance": {
+          "description": "Optional. The latency tolerance for the propagation of this product update. Defaults to latency-sensitive.",
+          "enum": [
+            "PRODUCT_UPDATE_LATENCY_TOLERANCE_UNSPECIFIED",
+            "PRODUCT_UPDATE_LATENCY_TOLERANCE_LATENCY_SENSITIVE",
+            "PRODUCT_UPDATE_LATENCY_TOLERANCE_LATENCY_TOLERANT"
+          ],
+          "enumDescriptions": [
+            "Defaults to PRODUCT_UPDATE_LATENCY_TOLERANCE_LATENCY_SENSITIVE.",
+            "The update will propagate to clients within several minutes on average and up to a few hours in rare cases. Throughput is limited to 7,200 updates per app per hour.",
+            "The update will propagate to clients within 24 hours. Supports high throughput of up to 720,000 updates per app per hour using batch modification methods."
+          ],
+          "type": "string"
+        },
+        "regionsVersion": {
+          "$ref": "RegionsVersion",
+          "description": "Required. The version of the available regions being used for the subscription_offer."
+        },
+        "subscriptionOffer": {
+          "$ref": "SubscriptionOffer",
+          "description": "Required. The subscription offer to update."
+        },
+        "updateMask": {
+          "description": "Required. The list of fields to be updated.",
+          "format": "google-fieldmask",
+          "type": "string"
+        }
+      },
+      "type": "object"
+    },
+    "UpdateSubscriptionOfferStateRequest": {
+      "description": "Request message to update the state of a subscription offer.",
+      "id": "UpdateSubscriptionOfferStateRequest",
+      "properties": {
+        "activateSubscriptionOfferRequest": {
+          "$ref": "ActivateSubscriptionOfferRequest",
+          "description": "Activates an offer. Once activated, the offer will be available to new subscribers."
+        },
+        "deactivateSubscriptionOfferRequest": {
+          "$ref": "DeactivateSubscriptionOfferRequest",
+          "description": "Deactivates an offer. Once deactivated, the offer will become unavailable to new subscribers, but existing subscribers will maintain their subscription"
+        }
+      },
+      "type": "object"
+    },
+    "UpdateSubscriptionRequest": {
+      "description": "Request message for UpdateSubscription.",
+      "id": "UpdateSubscriptionRequest",
+      "properties": {
+        "allowMissing": {
+          "description": "Optional. If set to true, and the subscription with the given package_name and product_id doesn't exist, the subscription will be created. If a new subscription is created, update_mask is ignored.",
+          "type": "boolean"
+        },
+        "latencyTolerance": {
+          "description": "Optional. The latency tolerance for the propagation of this product update. Defaults to latency-sensitive.",
+          "enum": [
+            "PRODUCT_UPDATE_LATENCY_TOLERANCE_UNSPECIFIED",
+            "PRODUCT_UPDATE_LATENCY_TOLERANCE_LATENCY_SENSITIVE",
+            "PRODUCT_UPDATE_LATENCY_TOLERANCE_LATENCY_TOLERANT"
+          ],
+          "enumDescriptions": [
+            "Defaults to PRODUCT_UPDATE_LATENCY_TOLERANCE_LATENCY_SENSITIVE.",
+            "The update will propagate to clients within several minutes on average and up to a few hours in rare cases. Throughput is limited to 7,200 updates per app per hour.",
+            "The update will propagate to clients within 24 hours. Supports high throughput of up to 720,000 updates per app per hour using batch modification methods."
+          ],
+          "type": "string"
+        },
+        "regionsVersion": {
+          "$ref": "RegionsVersion",
+          "description": "Required. The version of the available regions being used for the subscription."
+        },
+        "subscription": {
+          "$ref": "Subscription",
+          "description": "Required. The subscription to update."
+        },
+        "updateMask": {
+          "description": "Required. The list of fields to be updated.",
+          "format": "google-fieldmask",
+          "type": "string"
+        }
+      },
+      "type": "object"
+    },
     "UpgradeTargetingRule": {
       "description": "Represents a targeting rule of the form: User currently has {scope} [with billing period {billing_period}].",
       "id": "UpgradeTargetingRule",
diff --git a/googleapiclient/discovery_cache/documents/apigateway.v1.json b/googleapiclient/discovery_cache/documents/apigateway.v1.json
index 1eb0c058342..c1d3ba80c8f 100644
--- a/googleapiclient/discovery_cache/documents/apigateway.v1.json
+++ b/googleapiclient/discovery_cache/documents/apigateway.v1.json
@@ -1083,7 +1083,7 @@
       }
     }
   },
-  "revision": "20231101",
+  "revision": "20231129",
   "rootUrl": "https://apigateway.googleapis.com/",
   "schemas": {
     "ApigatewayApi": {
diff --git a/googleapiclient/discovery_cache/documents/apigateway.v1beta.json b/googleapiclient/discovery_cache/documents/apigateway.v1beta.json
index 0c2b7e13153..87870cc98e9 100644
--- a/googleapiclient/discovery_cache/documents/apigateway.v1beta.json
+++ b/googleapiclient/discovery_cache/documents/apigateway.v1beta.json
@@ -1083,7 +1083,7 @@
       }
     }
   },
-  "revision": "20231101",
+  "revision": "20231129",
   "rootUrl": "https://apigateway.googleapis.com/",
   "schemas": {
     "ApigatewayApi": {
diff --git a/googleapiclient/discovery_cache/documents/apigeeregistry.v1.json b/googleapiclient/discovery_cache/documents/apigeeregistry.v1.json
index 1b93d8e7ac0..c69afca917c 100644
--- a/googleapiclient/discovery_cache/documents/apigeeregistry.v1.json
+++ b/googleapiclient/discovery_cache/documents/apigeeregistry.v1.json
@@ -3272,7 +3272,7 @@
       }
     }
   },
-  "revision": "20231109",
+  "revision": "20231204",
   "rootUrl": "https://apigeeregistry.googleapis.com/",
   "schemas": {
     "Api": {
diff --git a/googleapiclient/discovery_cache/documents/apikeys.v2.json b/googleapiclient/discovery_cache/documents/apikeys.v2.json
index 724a36f5903..3f671e3af71 100644
--- a/googleapiclient/discovery_cache/documents/apikeys.v2.json
+++ b/googleapiclient/discovery_cache/documents/apikeys.v2.json
@@ -396,7 +396,7 @@
       }
     }
   },
-  "revision": "20231204",
+  "revision": "20231210",
   "rootUrl": "https://apikeys.googleapis.com/",
   "schemas": {
     "Operation": {
diff --git a/googleapiclient/discovery_cache/documents/appengine.v1.json b/googleapiclient/discovery_cache/documents/appengine.v1.json
index 6c015118e6a..75e55a0f4b6 100644
--- a/googleapiclient/discovery_cache/documents/appengine.v1.json
+++ b/googleapiclient/discovery_cache/documents/appengine.v1.json
@@ -1651,7 +1651,7 @@
       }
     }
   },
-  "revision": "20231109",
+  "revision": "20231204",
   "rootUrl": "https://appengine.googleapis.com/",
   "schemas": {
     "ApiConfigHandler": {
diff --git a/googleapiclient/discovery_cache/documents/appengine.v1alpha.json b/googleapiclient/discovery_cache/documents/appengine.v1alpha.json
index 5e9385d54e8..a6448225db9 100644
--- a/googleapiclient/discovery_cache/documents/appengine.v1alpha.json
+++ b/googleapiclient/discovery_cache/documents/appengine.v1alpha.json
@@ -887,7 +887,7 @@
       }
     }
   },
-  "revision": "20231109",
+  "revision": "20231204",
   "rootUrl": "https://appengine.googleapis.com/",
   "schemas": {
     "AuthorizedCertificate": {
diff --git a/googleapiclient/discovery_cache/documents/appengine.v1beta.json b/googleapiclient/discovery_cache/documents/appengine.v1beta.json
index 2f8c226282c..ab3a9a83860 100644
--- a/googleapiclient/discovery_cache/documents/appengine.v1beta.json
+++ b/googleapiclient/discovery_cache/documents/appengine.v1beta.json
@@ -1859,7 +1859,7 @@
       }
     }
   },
-  "revision": "20231109",
+  "revision": "20231204",
   "rootUrl": "https://appengine.googleapis.com/",
   "schemas": {
     "ApiConfigHandler": {
diff --git a/googleapiclient/discovery_cache/documents/apphub.v1.json b/googleapiclient/discovery_cache/documents/apphub.v1.json
new file mode 100644
index 00000000000..970ad43a3eb
--- /dev/null
+++ b/googleapiclient/discovery_cache/documents/apphub.v1.json
@@ -0,0 +1,719 @@
+{
+  "auth": {
+    "oauth2": {
+      "scopes": {
+        "https://www.googleapis.com/auth/cloud-platform": {
+          "description": "See, edit, configure, and delete your Google Cloud data and see the email address for your Google Account."
+        }
+      }
+    }
+  },
+  "basePath": "",
+  "baseUrl": "https://apphub.googleapis.com/",
+  "batchPath": "batch",
+  "canonicalName": "App Hub",
+  "description": "",
+  "discoveryVersion": "v1",
+  "documentationLink": "https://cloud.google.com/app-hub/docs/",
+  "fullyEncodeReservedExpansion": true,
+  "icons": {
+    "x16": "http://www.google.com/images/icons/product/search-16.gif",
+    "x32": "http://www.google.com/images/icons/product/search-32.gif"
+  },
+  "id": "apphub:v1",
+  "kind": "discovery#restDescription",
+  "mtlsRootUrl": "https://apphub.mtls.googleapis.com/",
+  "name": "apphub",
+  "ownerDomain": "google.com",
+  "ownerName": "Google",
+  "parameters": {
+    "$.xgafv": {
+      "description": "V1 error format.",
+      "enum": [
+        "1",
+        "2"
+      ],
+      "enumDescriptions": [
+        "v1 error format",
+        "v2 error format"
+      ],
+      "location": "query",
+      "type": "string"
+    },
+    "access_token": {
+      "description": "OAuth access token.",
+      "location": "query",
+      "type": "string"
+    },
+    "alt": {
+      "default": "json",
+      "description": "Data format for response.",
+      "enum": [
+        "json",
+        "media",
+        "proto"
+      ],
+      "enumDescriptions": [
+        "Responses with Content-Type of application/json",
+        "Media download with context-dependent Content-Type",
+        "Responses with Content-Type of application/x-protobuf"
+      ],
+      "location": "query",
+      "type": "string"
+    },
+    "callback": {
+      "description": "JSONP",
+      "location": "query",
+      "type": "string"
+    },
+    "fields": {
+      "description": "Selector specifying which fields to include in a partial response.",
+      "location": "query",
+      "type": "string"
+    },
+    "key": {
+      "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.",
+      "location": "query",
+      "type": "string"
+    },
+    "oauth_token": {
+      "description": "OAuth 2.0 token for the current user.",
+      "location": "query",
+      "type": "string"
+    },
+    "prettyPrint": {
+      "default": "true",
+      "description": "Returns response with indentations and line breaks.",
+      "location": "query",
+      "type": "boolean"
+    },
+    "quotaUser": {
+      "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.",
+      "location": "query",
+      "type": "string"
+    },
+    "uploadType": {
+      "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").",
+      "location": "query",
+      "type": "string"
+    },
+    "upload_protocol": {
+      "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").",
+      "location": "query",
+      "type": "string"
+    }
+  },
+  "protocol": "rest",
+  "resources": {
+    "projects": {
+      "resources": {
+        "locations": {
+          "methods": {
+            "get": {
+              "description": "Gets information about a location.",
+              "flatPath": "v1/projects/{projectsId}/locations/{locationsId}",
+              "httpMethod": "GET",
+              "id": "apphub.projects.locations.get",
+              "parameterOrder": [
+                "name"
+              ],
+              "parameters": {
+                "name": {
+                  "description": "Resource name for the location.",
+                  "location": "path",
+                  "pattern": "^projects/[^/]+/locations/[^/]+$",
+                  "required": true,
+                  "type": "string"
+                }
+              },
+              "path": "v1/{+name}",
+              "response": {
+                "$ref": "Location"
+              },
+              "scopes": [
+                "https://www.googleapis.com/auth/cloud-platform"
+              ]
+            },
+            "list": {
+              "description": "Lists information about the supported locations for this service.",
+              "flatPath": "v1/projects/{projectsId}/locations",
+              "httpMethod": "GET",
+              "id": "apphub.projects.locations.list",
+              "parameterOrder": [
+                "name"
+              ],
+              "parameters": {
+                "filter": {
+                  "description": "A filter to narrow down results to a preferred subset. The filtering language accepts strings like `\"displayName=tokyo\"`, and is documented in more detail in [AIP-160](https://google.aip.dev/160).",
+                  "location": "query",
+                  "type": "string"
+                },
+                "name": {
+                  "description": "The resource that owns the locations collection, if applicable.",
+                  "location": "path",
+                  "pattern": "^projects/[^/]+$",
+                  "required": true,
+                  "type": "string"
+                },
+                "pageSize": {
+                  "description": "The maximum number of results to return. If not set, the service selects a default.",
+                  "format": "int32",
+                  "location": "query",
+                  "type": "integer"
+                },
+                "pageToken": {
+                  "description": "A page token received from the `next_page_token` field in the response. Send that page token to receive the subsequent page.",
+                  "location": "query",
+                  "type": "string"
+                }
+              },
+              "path": "v1/{+name}/locations",
+              "response": {
+                "$ref": "ListLocationsResponse"
+              },
+              "scopes": [
+                "https://www.googleapis.com/auth/cloud-platform"
+              ]
+            }
+          },
+          "resources": {
+            "applications": {
+              "methods": {
+                "getIamPolicy": {
+                  "description": "Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.",
+                  "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/applications/{applicationsId}:getIamPolicy",
+                  "httpMethod": "GET",
+                  "id": "apphub.projects.locations.applications.getIamPolicy",
+                  "parameterOrder": [
+                    "resource"
+                  ],
+                  "parameters": {
+                    "options.requestedPolicyVersion": {
+                      "description": "Optional. The maximum policy version that will be used to format the policy. Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected. Requests for policies with any conditional role bindings must specify version 3. Policies with no conditional role bindings may specify any valid value or leave the field unset. The policy in the response might use the policy version that you specified, or it might use a lower policy version. For example, if you specify version 3, but the policy has no conditional role bindings, the response uses version 1. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).",
+                      "format": "int32",
+                      "location": "query",
+                      "type": "integer"
+                    },
+                    "resource": {
+                      "description": "REQUIRED: The resource for which the policy is being requested. See [Resource names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.",
+                      "location": "path",
+                      "pattern": "^projects/[^/]+/locations/[^/]+/applications/[^/]+$",
+                      "required": true,
+                      "type": "string"
+                    }
+                  },
+                  "path": "v1/{+resource}:getIamPolicy",
+                  "response": {
+                    "$ref": "Policy"
+                  },
+                  "scopes": [
+                    "https://www.googleapis.com/auth/cloud-platform"
+                  ]
+                },
+                "setIamPolicy": {
+                  "description": "Sets the access control policy on the specified resource. Replaces any existing policy. Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.",
+                  "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/applications/{applicationsId}:setIamPolicy",
+                  "httpMethod": "POST",
+                  "id": "apphub.projects.locations.applications.setIamPolicy",
+                  "parameterOrder": [
+                    "resource"
+                  ],
+                  "parameters": {
+                    "resource": {
+                      "description": "REQUIRED: The resource for which the policy is being specified. See [Resource names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.",
+                      "location": "path",
+                      "pattern": "^projects/[^/]+/locations/[^/]+/applications/[^/]+$",
+                      "required": true,
+                      "type": "string"
+                    }
+                  },
+                  "path": "v1/{+resource}:setIamPolicy",
+                  "request": {
+                    "$ref": "SetIamPolicyRequest"
+                  },
+                  "response": {
+                    "$ref": "Policy"
+                  },
+                  "scopes": [
+                    "https://www.googleapis.com/auth/cloud-platform"
+                  ]
+                },
+                "testIamPermissions": {
+                  "description": "Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a `NOT_FOUND` error. Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may \"fail open\" without warning.",
+                  "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/applications/{applicationsId}:testIamPermissions",
+                  "httpMethod": "POST",
+                  "id": "apphub.projects.locations.applications.testIamPermissions",
+                  "parameterOrder": [
+                    "resource"
+                  ],
+                  "parameters": {
+                    "resource": {
+                      "description": "REQUIRED: The resource for which the policy detail is being requested. See [Resource names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.",
+                      "location": "path",
+                      "pattern": "^projects/[^/]+/locations/[^/]+/applications/[^/]+$",
+                      "required": true,
+                      "type": "string"
+                    }
+                  },
+                  "path": "v1/{+resource}:testIamPermissions",
+                  "request": {
+                    "$ref": "TestIamPermissionsRequest"
+                  },
+                  "response": {
+                    "$ref": "TestIamPermissionsResponse"
+                  },
+                  "scopes": [
+                    "https://www.googleapis.com/auth/cloud-platform"
+                  ]
+                }
+              }
+            },
+            "operations": {
+              "methods": {
+                "cancel": {
+                  "description": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.",
+                  "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/operations/{operationsId}:cancel",
+                  "httpMethod": "POST",
+                  "id": "apphub.projects.locations.operations.cancel",
+                  "parameterOrder": [
+                    "name"
+                  ],
+                  "parameters": {
+                    "name": {
+                      "description": "The name of the operation resource to be cancelled.",
+                      "location": "path",
+                      "pattern": "^projects/[^/]+/locations/[^/]+/operations/[^/]+$",
+                      "required": true,
+                      "type": "string"
+                    }
+                  },
+                  "path": "v1/{+name}:cancel",
+                  "request": {
+                    "$ref": "CancelOperationRequest"
+                  },
+                  "response": {
+                    "$ref": "Empty"
+                  },
+                  "scopes": [
+                    "https://www.googleapis.com/auth/cloud-platform"
+                  ]
+                },
+                "delete": {
+                  "description": "Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`.",
+                  "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/operations/{operationsId}",
+                  "httpMethod": "DELETE",
+                  "id": "apphub.projects.locations.operations.delete",
+                  "parameterOrder": [
+                    "name"
+                  ],
+                  "parameters": {
+                    "name": {
+                      "description": "The name of the operation resource to be deleted.",
+                      "location": "path",
+                      "pattern": "^projects/[^/]+/locations/[^/]+/operations/[^/]+$",
+                      "required": true,
+                      "type": "string"
+                    }
+                  },
+                  "path": "v1/{+name}",
+                  "response": {
+                    "$ref": "Empty"
+                  },
+                  "scopes": [
+                    "https://www.googleapis.com/auth/cloud-platform"
+                  ]
+                },
+                "get": {
+                  "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.",
+                  "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/operations/{operationsId}",
+                  "httpMethod": "GET",
+                  "id": "apphub.projects.locations.operations.get",
+                  "parameterOrder": [
+                    "name"
+                  ],
+                  "parameters": {
+                    "name": {
+                      "description": "The name of the operation resource.",
+                      "location": "path",
+                      "pattern": "^projects/[^/]+/locations/[^/]+/operations/[^/]+$",
+                      "required": true,
+                      "type": "string"
+                    }
+                  },
+                  "path": "v1/{+name}",
+                  "response": {
+                    "$ref": "Operation"
+                  },
+                  "scopes": [
+                    "https://www.googleapis.com/auth/cloud-platform"
+                  ]
+                },
+                "list": {
+                  "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`.",
+                  "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/operations",
+                  "httpMethod": "GET",
+                  "id": "apphub.projects.locations.operations.list",
+                  "parameterOrder": [
+                    "name"
+                  ],
+                  "parameters": {
+                    "filter": {
+                      "description": "The standard list filter.",
+                      "location": "query",
+                      "type": "string"
+                    },
+                    "name": {
+                      "description": "The name of the operation's parent resource.",
+                      "location": "path",
+                      "pattern": "^projects/[^/]+/locations/[^/]+$",
+                      "required": true,
+                      "type": "string"
+                    },
+                    "pageSize": {
+                      "description": "The standard list page size.",
+                      "format": "int32",
+                      "location": "query",
+                      "type": "integer"
+                    },
+                    "pageToken": {
+                      "description": "The standard list page token.",
+                      "location": "query",
+                      "type": "string"
+                    }
+                  },
+                  "path": "v1/{+name}/operations",
+                  "response": {
+                    "$ref": "ListOperationsResponse"
+                  },
+                  "scopes": [
+                    "https://www.googleapis.com/auth/cloud-platform"
+                  ]
+                }
+              }
+            }
+          }
+        }
+      }
+    }
+  },
+  "revision": "20231208",
+  "rootUrl": "https://apphub.googleapis.com/",
+  "schemas": {
+    "AuditConfig": {
+      "description": "Specifies the audit configuration for a service. The configuration determines which permission types are logged, and what identities, if any, are exempted from logging. An AuditConfig must have one or more AuditLogConfigs. If there are AuditConfigs for both `allServices` and a specific service, the union of the two AuditConfigs is used for that service: the log_types specified in each AuditConfig are enabled, and the exempted_members in each AuditLogConfig are exempted. Example Policy with multiple AuditConfigs: { \"audit_configs\": [ { \"service\": \"allServices\", \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\", \"exempted_members\": [ \"user:jose@example.com\" ] }, { \"log_type\": \"DATA_WRITE\" }, { \"log_type\": \"ADMIN_READ\" } ] }, { \"service\": \"sampleservice.googleapis.com\", \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\" }, { \"log_type\": \"DATA_WRITE\", \"exempted_members\": [ \"user:aliya@example.com\" ] } ] } ] } For sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ logging. It also exempts `jose@example.com` from DATA_READ logging, and `aliya@example.com` from DATA_WRITE logging.",
+      "id": "AuditConfig",
+      "properties": {
+        "auditLogConfigs": {
+          "description": "The configuration for logging of each type of permission.",
+          "items": {
+            "$ref": "AuditLogConfig"
+          },
+          "type": "array"
+        },
+        "service": {
+          "description": "Specifies a service that will be enabled for audit logging. For example, `storage.googleapis.com`, `cloudsql.googleapis.com`. `allServices` is a special value that covers all services.",
+          "type": "string"
+        }
+      },
+      "type": "object"
+    },
+    "AuditLogConfig": {
+      "description": "Provides the configuration for logging a type of permissions. Example: { \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\", \"exempted_members\": [ \"user:jose@example.com\" ] }, { \"log_type\": \"DATA_WRITE\" } ] } This enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting jose@example.com from DATA_READ logging.",
+      "id": "AuditLogConfig",
+      "properties": {
+        "exemptedMembers": {
+          "description": "Specifies the identities that do not cause logging for this type of permission. Follows the same format of Binding.members.",
+          "items": {
+            "type": "string"
+          },
+          "type": "array"
+        },
+        "logType": {
+          "description": "The log type that this config enables.",
+          "enum": [
+            "LOG_TYPE_UNSPECIFIED",
+            "ADMIN_READ",
+            "DATA_WRITE",
+            "DATA_READ"
+          ],
+          "enumDescriptions": [
+            "Default case. Should never be this.",
+            "Admin reads. Example: CloudIAM getIamPolicy",
+            "Data writes. Example: CloudSQL Users create",
+            "Data reads. Example: CloudSQL Users list"
+          ],
+          "type": "string"
+        }
+      },
+      "type": "object"
+    },
+    "Binding": {
+      "description": "Associates `members`, or principals, with a `role`.",
+      "id": "Binding",
+      "properties": {
+        "condition": {
+          "$ref": "Expr",
+          "description": "The condition that is associated with this binding. If the condition evaluates to `true`, then this binding applies to the current request. If the condition evaluates to `false`, then this binding does not apply to the current request. However, a different role binding might grant the same role to one or more of the principals in this binding. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies)."
+        },
+        "members": {
+          "description": "Specifies the principals requesting access for a Google Cloud resource. `members` can have the following values: * `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account. * `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account. Does not include identities that come from external identity providers (IdPs) through identity federation. * `user:{emailid}`: An email address that represents a specific Google account. For example, `alice@example.com` . * `serviceAccount:{emailid}`: An email address that represents a Google service account. For example, `my-other-app@appspot.gserviceaccount.com`. * `serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]`: An identifier for a [Kubernetes service account](https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts). For example, `my-project.svc.id.goog[my-namespace/my-kubernetes-sa]`. * `group:{emailid}`: An email address that represents a Google group. For example, `admins@example.com`. * `domain:{domain}`: The G Suite domain (primary) that represents all the users of that domain. For example, `google.com` or `example.com`. * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a user that has been recently deleted. For example, `alice@example.com?uid=123456789012345678901`. If the user is recovered, this value reverts to `user:{emailid}` and the recovered user retains the role in the binding. * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. If the service account is undeleted, this value reverts to `serviceAccount:{emailid}` and the undeleted service account retains the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, `admins@example.com?uid=123456789012345678901`. If the group is recovered, this value reverts to `group:{emailid}` and the recovered group retains the role in the binding.",
+          "items": {
+            "type": "string"
+          },
+          "type": "array"
+        },
+        "role": {
+          "description": "Role that is assigned to the list of `members`, or principals. For example, `roles/viewer`, `roles/editor`, or `roles/owner`.",
+          "type": "string"
+        }
+      },
+      "type": "object"
+    },
+    "CancelOperationRequest": {
+      "description": "The request message for Operations.CancelOperation.",
+      "id": "CancelOperationRequest",
+      "properties": {},
+      "type": "object"
+    },
+    "Empty": {
+      "description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); }",
+      "id": "Empty",
+      "properties": {},
+      "type": "object"
+    },
+    "Expr": {
+      "description": "Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. The syntax and semantics of CEL are documented at https://github.com/google/cel-spec. Example (Comparison): title: \"Summary size limit\" description: \"Determines if a summary is less than 100 chars\" expression: \"document.summary.size() < 100\" Example (Equality): title: \"Requestor is owner\" description: \"Determines if requestor is the document owner\" expression: \"document.owner == request.auth.claims.email\" Example (Logic): title: \"Public documents\" description: \"Determine whether the document should be publicly visible\" expression: \"document.type != 'private' && document.type != 'internal'\" Example (Data Manipulation): title: \"Notification string\" description: \"Create a notification string with a timestamp.\" expression: \"'New message received at ' + string(document.create_time)\" The exact variables and functions that may be referenced within an expression are determined by the service that evaluates it. See the service documentation for additional information.",
+      "id": "Expr",
+      "properties": {
+        "description": {
+          "description": "Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI.",
+          "type": "string"
+        },
+        "expression": {
+          "description": "Textual representation of an expression in Common Expression Language syntax.",
+          "type": "string"
+        },
+        "location": {
+          "description": "Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file.",
+          "type": "string"
+        },
+        "title": {
+          "description": "Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression.",
+          "type": "string"
+        }
+      },
+      "type": "object"
+    },
+    "ListLocationsResponse": {
+      "description": "The response message for Locations.ListLocations.",
+      "id": "ListLocationsResponse",
+      "properties": {
+        "locations": {
+          "description": "A list of locations that matches the specified filter in the request.",
+          "items": {
+            "$ref": "Location"
+          },
+          "type": "array"
+        },
+        "nextPageToken": {
+          "description": "The standard List next-page token.",
+          "type": "string"
+        }
+      },
+      "type": "object"
+    },
+    "ListOperationsResponse": {
+      "description": "The response message for Operations.ListOperations.",
+      "id": "ListOperationsResponse",
+      "properties": {
+        "nextPageToken": {
+          "description": "The standard List next-page token.",
+          "type": "string"
+        },
+        "operations": {
+          "description": "A list of operations that matches the specified filter in the request.",
+          "items": {
+            "$ref": "Operation"
+          },
+          "type": "array"
+        }
+      },
+      "type": "object"
+    },
+    "Location": {
+      "description": "A resource that represents a Google Cloud location.",
+      "id": "Location",
+      "properties": {
+        "displayName": {
+          "description": "The friendly name for this location, typically a nearby city name. For example, \"Tokyo\".",
+          "type": "string"
+        },
+        "labels": {
+          "additionalProperties": {
+            "type": "string"
+          },
+          "description": "Cross-service attributes for the location. For example {\"cloud.googleapis.com/region\": \"us-east1\"}",
+          "type": "object"
+        },
+        "locationId": {
+          "description": "The canonical id for this location. For example: `\"us-east1\"`.",
+          "type": "string"
+        },
+        "metadata": {
+          "additionalProperties": {
+            "description": "Properties of the object. Contains field @type with type URL.",
+            "type": "any"
+          },
+          "description": "Service-specific metadata. For example the available capacity at the given location.",
+          "type": "object"
+        },
+        "name": {
+          "description": "Resource name for the location, which may vary between implementations. For example: `\"projects/example-project/locations/us-east1\"`",
+          "type": "string"
+        }
+      },
+      "type": "object"
+    },
+    "Operation": {
+      "description": "This resource represents a long-running operation that is the result of a network API call.",
+      "id": "Operation",
+      "properties": {
+        "done": {
+          "description": "If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.",
+          "type": "boolean"
+        },
+        "error": {
+          "$ref": "Status",
+          "description": "The error result of the operation in case of failure or cancellation."
+        },
+        "metadata": {
+          "additionalProperties": {
+            "description": "Properties of the object. Contains field @type with type URL.",
+            "type": "any"
+          },
+          "description": "Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.",
+          "type": "object"
+        },
+        "name": {
+          "description": "The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.",
+          "type": "string"
+        },
+        "response": {
+          "additionalProperties": {
+            "description": "Properties of the object. Contains field @type with type URL.",
+            "type": "any"
+          },
+          "description": "The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.",
+          "type": "object"
+        }
+      },
+      "type": "object"
+    },
+    "Policy": {
+      "description": "An Identity and Access Management (IAM) policy, which specifies access controls for Google Cloud resources. A `Policy` is a collection of `bindings`. A `binding` binds one or more `members`, or principals, to a single `role`. Principals can be user accounts, service accounts, Google groups, and domains (such as G Suite). A `role` is a named list of permissions; each `role` can be an IAM predefined role or a user-created custom role. For some types of Google Cloud resources, a `binding` can also specify a `condition`, which is a logical expression that allows access to a resource only if the expression evaluates to `true`. A condition can add constraints based on attributes of the request, the resource, or both. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies). **JSON example:** ``` { \"bindings\": [ { \"role\": \"roles/resourcemanager.organizationAdmin\", \"members\": [ \"user:mike@example.com\", \"group:admins@example.com\", \"domain:google.com\", \"serviceAccount:my-project-id@appspot.gserviceaccount.com\" ] }, { \"role\": \"roles/resourcemanager.organizationViewer\", \"members\": [ \"user:eve@example.com\" ], \"condition\": { \"title\": \"expirable access\", \"description\": \"Does not grant access after Sep 2020\", \"expression\": \"request.time < timestamp('2020-10-01T00:00:00.000Z')\", } } ], \"etag\": \"BwWWja0YfJA=\", \"version\": 3 } ``` **YAML example:** ``` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3 ``` For a description of IAM and its features, see the [IAM documentation](https://cloud.google.com/iam/docs/).",
+      "id": "Policy",
+      "properties": {
+        "auditConfigs": {
+          "description": "Specifies cloud audit logging configuration for this policy.",
+          "items": {
+            "$ref": "AuditConfig"
+          },
+          "type": "array"
+        },
+        "bindings": {
+          "description": "Associates a list of `members`, or principals, with a `role`. Optionally, may specify a `condition` that determines how and when the `bindings` are applied. Each of the `bindings` must contain at least one principal. The `bindings` in a `Policy` can refer to up to 1,500 principals; up to 250 of these principals can be Google groups. Each occurrence of a principal counts towards these limits. For example, if the `bindings` grant 50 different roles to `user:alice@example.com`, and not to any other principal, then you can add another 1,450 principals to the `bindings` in the `Policy`.",
+          "items": {
+            "$ref": "Binding"
+          },
+          "type": "array"
+        },
+        "etag": {
+          "description": "`etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the `etag` in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An `etag` is returned in the response to `getIamPolicy`, and systems are expected to put that etag in the request to `setIamPolicy` to ensure that their change will be applied to the same version of the policy. **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost.",
+          "format": "byte",
+          "type": "string"
+        },
+        "version": {
+          "description": "Specifies the format of the policy. Valid values are `0`, `1`, and `3`. Requests that specify an invalid value are rejected. Any operation that affects conditional role bindings must specify version `3`. This requirement applies to the following operations: * Getting a policy that includes a conditional role binding * Adding a conditional role binding to a policy * Changing a conditional role binding in a policy * Removing any role binding, with or without a condition, from a policy that includes conditions **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost. If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).",
+          "format": "int32",
+          "type": "integer"
+        }
+      },
+      "type": "object"
+    },
+    "SetIamPolicyRequest": {
+      "description": "Request message for `SetIamPolicy` method.",
+      "id": "SetIamPolicyRequest",
+      "properties": {
+        "policy": {
+          "$ref": "Policy",
+          "description": "REQUIRED: The complete policy to be applied to the `resource`. The size of the policy is limited to a few 10s of KB. An empty policy is a valid policy but certain Google Cloud services (such as Projects) might reject them."
+        },
+        "updateMask": {
+          "description": "OPTIONAL: A FieldMask specifying which fields of the policy to modify. Only the fields in the mask will be modified. If no mask is provided, the following default mask is used: `paths: \"bindings, etag\"`",
+          "format": "google-fieldmask",
+          "type": "string"
+        }
+      },
+      "type": "object"
+    },
+    "Status": {
+      "description": "The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors).",
+      "id": "Status",
+      "properties": {
+        "code": {
+          "description": "The status code, which should be an enum value of google.rpc.Code.",
+          "format": "int32",
+          "type": "integer"
+        },
+        "details": {
+          "description": "A list of messages that carry the error details. There is a common set of message types for APIs to use.",
+          "items": {
+            "additionalProperties": {
+              "description": "Properties of the object. Contains field @type with type URL.",
+              "type": "any"
+            },
+            "type": "object"
+          },
+          "type": "array"
+        },
+        "message": {
+          "description": "A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.",
+          "type": "string"
+        }
+      },
+      "type": "object"
+    },
+    "TestIamPermissionsRequest": {
+      "description": "Request message for `TestIamPermissions` method.",
+      "id": "TestIamPermissionsRequest",
+      "properties": {
+        "permissions": {
+          "description": "The set of permissions to check for the `resource`. Permissions with wildcards (such as `*` or `storage.*`) are not allowed. For more information see [IAM Overview](https://cloud.google.com/iam/docs/overview#permissions).",
+          "items": {
+            "type": "string"
+          },
+          "type": "array"
+        }
+      },
+      "type": "object"
+    },
+    "TestIamPermissionsResponse": {
+      "description": "Response message for `TestIamPermissions` method.",
+      "id": "TestIamPermissionsResponse",
+      "properties": {
+        "permissions": {
+          "description": "A subset of `TestPermissionsRequest.permissions` that the caller is allowed.",
+          "items": {
+            "type": "string"
+          },
+          "type": "array"
+        }
+      },
+      "type": "object"
+    }
+  },
+  "servicePath": "",
+  "title": "App Hub API",
+  "version": "v1",
+  "version_module": true
+}
\ No newline at end of file
diff --git a/googleapiclient/discovery_cache/documents/apphub.v1alpha.json b/googleapiclient/discovery_cache/documents/apphub.v1alpha.json
index 88f160dfd37..11df735d92c 100644
--- a/googleapiclient/discovery_cache/documents/apphub.v1alpha.json
+++ b/googleapiclient/discovery_cache/documents/apphub.v1alpha.json
@@ -1378,7 +1378,7 @@
       }
     }
   },
-  "revision": "20231129",
+  "revision": "20231208",
   "rootUrl": "https://apphub.googleapis.com/",
   "schemas": {
     "Application": {
diff --git a/googleapiclient/discovery_cache/documents/area120tables.v1alpha1.json b/googleapiclient/discovery_cache/documents/area120tables.v1alpha1.json
index 5bdf6957115..c1a6f0de3b7 100644
--- a/googleapiclient/discovery_cache/documents/area120tables.v1alpha1.json
+++ b/googleapiclient/discovery_cache/documents/area120tables.v1alpha1.json
@@ -586,7 +586,7 @@
       }
     }
   },
-  "revision": "20231205",
+  "revision": "20231210",
   "rootUrl": "https://area120tables.googleapis.com/",
   "schemas": {
     "BatchCreateRowsRequest": {
diff --git a/googleapiclient/discovery_cache/documents/authorizedbuyersmarketplace.v1.json b/googleapiclient/discovery_cache/documents/authorizedbuyersmarketplace.v1.json
index 34719a20f92..bae597133f9 100644
--- a/googleapiclient/discovery_cache/documents/authorizedbuyersmarketplace.v1.json
+++ b/googleapiclient/discovery_cache/documents/authorizedbuyersmarketplace.v1.json
@@ -1307,7 +1307,7 @@
       }
     }
   },
-  "revision": "20231206",
+  "revision": "20231211",
   "rootUrl": "https://authorizedbuyersmarketplace.googleapis.com/",
   "schemas": {
     "AcceptProposalRequest": {
diff --git a/googleapiclient/discovery_cache/documents/baremetalsolution.v2.json b/googleapiclient/discovery_cache/documents/baremetalsolution.v2.json
index c475dc0a98c..7d44f817a09 100644
--- a/googleapiclient/discovery_cache/documents/baremetalsolution.v2.json
+++ b/googleapiclient/discovery_cache/documents/baremetalsolution.v2.json
@@ -328,6 +328,31 @@
                     "https://www.googleapis.com/auth/cloud-platform"
                   ]
                 },
+                "loadAuthInfo": {
+                  "description": "Load auth info for a server.",
+                  "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/instances/{instancesId}:loadAuthInfo",
+                  "httpMethod": "GET",
+                  "id": "baremetalsolution.projects.locations.instances.loadAuthInfo",
+                  "parameterOrder": [
+                    "name"
+                  ],
+                  "parameters": {
+                    "name": {
+                      "description": "Required. Name of the server.",
+                      "location": "path",
+                      "pattern": "^projects/[^/]+/locations/[^/]+/instances/[^/]+$",
+                      "required": true,
+                      "type": "string"
+                    }
+                  },
+                  "path": "v2/{+name}:loadAuthInfo",
+                  "response": {
+                    "$ref": "LoadInstanceAuthInfoResponse"
+                  },
+                  "scopes": [
+                    "https://www.googleapis.com/auth/cloud-platform"
+                  ]
+                },
                 "patch": {
                   "description": "Update details of a single server.",
                   "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/instances/{instancesId}",
@@ -1613,7 +1638,7 @@
       }
     }
   },
-  "revision": "20231106",
+  "revision": "20231130",
   "rootUrl": "https://baremetalsolution.googleapis.com/",
   "schemas": {
     "AllowedClient": {
@@ -1806,6 +1831,10 @@
           "readOnly": true,
           "type": "boolean"
         },
+        "kmsKeyVersion": {
+          "description": "Optional. Name of the KMS crypto key version used to encrypt the initial passwords. The key has to have ASYMMETRIC_DECRYPT purpose. Format is `projects/{project}/locations/{location}/keyRings/{keyring}/cryptoKeys/{key}/cryptoKeyVersions/{version}`.",
+          "type": "string"
+        },
         "labels": {
           "additionalProperties": {
             "type": "string"
@@ -1860,6 +1889,13 @@
           "description": "Immutable. Pod name. Pod is an independent part of infrastructure. Instance can only be connected to the assets (networks, volumes) allocated in the same pod.",
           "type": "string"
         },
+        "sshKeys": {
+          "description": "Optional. List of SSH Keys used during instance provisioning.",
+          "items": {
+            "type": "string"
+          },
+          "type": "array"
+        },
         "state": {
           "description": "Output only. The state of the server.",
           "enum": [
@@ -1941,6 +1977,10 @@
           "description": "Instance type. [Available types](https://cloud.google.com/bare-metal/docs/bms-planning#server_configurations)",
           "type": "string"
         },
+        "kmsKeyVersion": {
+          "description": "Name of the KMS crypto key version used to encrypt the initial passwords. The key has to have ASYMMETRIC_DECRYPT purpose.",
+          "type": "string"
+        },
         "logicalInterfaces": {
           "description": "List of logical interfaces for the instance. The number of logical interfaces will be the same as number of hardware bond/nic on the chosen network template. Filled if InstanceConfig.multivlan_config is true.",
           "items": {
@@ -2274,6 +2314,27 @@
       },
       "type": "object"
     },
+    "LoadInstanceAuthInfoResponse": {
+      "description": "Response for LoadInstanceAuthInfo.",
+      "id": "LoadInstanceAuthInfoResponse",
+      "properties": {
+        "sshKeys": {
+          "description": "List of ssh keys.",
+          "items": {
+            "$ref": "SSHKey"
+          },
+          "type": "array"
+        },
+        "userAccounts": {
+          "additionalProperties": {
+            "$ref": "UserAccount"
+          },
+          "description": "Map of username to the user account info.",
+          "type": "object"
+        }
+      },
+      "type": "object"
+    },
     "Location": {
       "description": "A resource that represents a Google Cloud location.",
       "id": "Location",
@@ -2393,7 +2454,7 @@
           "type": "boolean"
         },
         "sizeGb": {
-          "description": "The size of this LUN, in gigabytes.",
+          "description": "The size of this LUN, in GiB.",
           "format": "int64",
           "type": "string"
         },
@@ -3334,6 +3395,21 @@
       },
       "type": "object"
     },
+    "UserAccount": {
+      "description": "User account provisioned for the customer.",
+      "id": "UserAccount",
+      "properties": {
+        "encryptedPassword": {
+          "description": "Encrypted initial password value.",
+          "type": "string"
+        },
+        "kmsKeyVersion": {
+          "description": "KMS CryptoKey Version used to encrypt the password.",
+          "type": "string"
+        }
+      },
+      "type": "object"
+    },
     "VRF": {
       "description": "A network VRF.",
       "id": "VRF",
diff --git a/googleapiclient/discovery_cache/documents/batch.v1.json b/googleapiclient/discovery_cache/documents/batch.v1.json
index 6f4c4a843cd..9ff23b4b5a3 100644
--- a/googleapiclient/discovery_cache/documents/batch.v1.json
+++ b/googleapiclient/discovery_cache/documents/batch.v1.json
@@ -561,7 +561,7 @@
       }
     }
   },
-  "revision": "20231106",
+  "revision": "20231122",
   "rootUrl": "https://batch.googleapis.com/",
   "schemas": {
     "Accelerator": {
@@ -1036,12 +1036,6 @@
       "properties": {},
       "type": "object"
     },
-    "CloudLoggingOption": {
-      "description": "CloudLoggingOption contains additional settings for cloud logging generated by Batch job.",
-      "id": "CloudLoggingOption",
-      "properties": {},
-      "type": "object"
-    },
     "ComputeResource": {
       "description": "Compute resource requirements. ComputeResource defines the amount of resources required for each task. Make sure your tasks have enough resources to successfully run. If you also define the types of resources for a job to use with the [InstancePolicyOrTemplate](https://cloud.google.com/batch/docs/reference/rest/v1/projects.locations.jobs#instancepolicyortemplate) field, make sure both fields are compatible with each other.",
       "id": "ComputeResource",
@@ -1079,6 +1073,10 @@
           },
           "type": "array"
         },
+        "enableImageStreaming": {
+          "description": "Optional. If set to true, container will run with Image streaming. The container runtime will be changed to containerd instead of docker. Currently, only imageUri, commands, entrypoint and volumes are supported and any other fields will be ignored. Please refer [here](https://github.com/GoogleCloudPlatform/batch-samples/tree/main/api-samples/image-streaming) for the feature requirements and limitations.",
+          "type": "boolean"
+        },
         "entrypoint": {
           "description": "Overrides the `ENTRYPOINT` specified in the container.",
           "type": "string"
@@ -1594,10 +1592,6 @@
       "description": "LogsPolicy describes how outputs from a Job's Tasks (stdout/stderr) will be preserved.",
       "id": "LogsPolicy",
       "properties": {
-        "cloudLoggingOption": {
-          "$ref": "CloudLoggingOption",
-          "description": "Optional. Additional settings for Cloud Logging. It will only take effect when the destination of LogsPolicy is set to CLOUD_LOGGING."
-        },
         "destination": {
           "description": "Where logs should be saved.",
           "enum": [
diff --git a/googleapiclient/discovery_cache/documents/beyondcorp.v1.json b/googleapiclient/discovery_cache/documents/beyondcorp.v1.json
index 87e4e3217da..18861213b1a 100644
--- a/googleapiclient/discovery_cache/documents/beyondcorp.v1.json
+++ b/googleapiclient/discovery_cache/documents/beyondcorp.v1.json
@@ -1804,7 +1804,7 @@
       }
     }
   },
-  "revision": "20231113",
+  "revision": "20231129",
   "rootUrl": "https://beyondcorp.googleapis.com/",
   "schemas": {
     "AllocatedConnection": {
diff --git a/googleapiclient/discovery_cache/documents/beyondcorp.v1alpha.json b/googleapiclient/discovery_cache/documents/beyondcorp.v1alpha.json
index c0301d96fa4..05efd58290f 100644
--- a/googleapiclient/discovery_cache/documents/beyondcorp.v1alpha.json
+++ b/googleapiclient/discovery_cache/documents/beyondcorp.v1alpha.json
@@ -3716,7 +3716,7 @@
       }
     }
   },
-  "revision": "20231113",
+  "revision": "20231129",
   "rootUrl": "https://beyondcorp.googleapis.com/",
   "schemas": {
     "AllocatedConnection": {
diff --git a/googleapiclient/discovery_cache/documents/biglake.v1.json b/googleapiclient/discovery_cache/documents/biglake.v1.json
index 5d9397c5101..738d7272847 100644
--- a/googleapiclient/discovery_cache/documents/biglake.v1.json
+++ b/googleapiclient/discovery_cache/documents/biglake.v1.json
@@ -616,7 +616,7 @@
       }
     }
   },
-  "revision": "20231129",
+  "revision": "20231204",
   "rootUrl": "https://biglake.googleapis.com/",
   "schemas": {
     "Catalog": {
diff --git a/googleapiclient/discovery_cache/documents/bigquery.v2.json b/googleapiclient/discovery_cache/documents/bigquery.v2.json
index 13a53472193..7682dde74f0 100644
--- a/googleapiclient/discovery_cache/documents/bigquery.v2.json
+++ b/googleapiclient/discovery_cache/documents/bigquery.v2.json
@@ -1686,7 +1686,7 @@
       }
     }
   },
-  "revision": "20231021",
+  "revision": "20231202",
   "rootUrl": "https://bigquery.googleapis.com/",
   "schemas": {
     "AggregateClassificationMetrics": {
@@ -5966,16 +5966,23 @@
             "REMOTE_SERVICE_TYPE_UNSPECIFIED",
             "CLOUD_AI_TRANSLATE_V3",
             "CLOUD_AI_VISION_V1",
-            "CLOUD_AI_NATURAL_LANGUAGE_V1"
+            "CLOUD_AI_NATURAL_LANGUAGE_V1",
+            "CLOUD_AI_SPEECH_TO_TEXT_V2"
           ],
           "enumDescriptions": [
             "Unspecified remote service type.",
             "V3 Cloud AI Translation API. See more details at [Cloud Translation API] (https://cloud.google.com/translate/docs/reference/rest).",
             "V1 Cloud AI Vision API See more details at [Cloud Vision API] (https://cloud.google.com/vision/docs/reference/rest).",
-            "V1 Cloud AI Natural Language API. See more details at [REST Resource: documents](https://cloud.google.com/natural-language/docs/reference/rest/v1/documents)."
+            "V1 Cloud AI Natural Language API. See more details at [REST Resource: documents](https://cloud.google.com/natural-language/docs/reference/rest/v1/documents).",
+            "V2 Speech-to-Text API. See more details at [Google Cloud Speech-to-Text V2 API](https://cloud.google.com/speech-to-text/v2/docs)"
           ],
           "readOnly": true,
           "type": "string"
+        },
+        "speechRecognizer": {
+          "description": "Output only. The name of the speech recognizer to use for speech recognition. The expected format is `projects/{project}/locations/{location}/recognizers/{recognizer}`. Customers can specify this field at model creation. If not specified, a default recognizer `projects/{model project}/locations/global/recognizers/_` will be used. See more details at [recognizers](https://cloud.google.com/speech-to-text/v2/docs/reference/rest/v2/projects.locations.recognizers)",
+          "readOnly": true,
+          "type": "string"
         }
       },
       "type": "object"
@@ -8273,6 +8280,36 @@
       },
       "type": "object"
     },
+    "VectorSearchStatistics": {
+      "description": "Statistics for a vector search query. Populated as part of JobStatistics2.",
+      "id": "VectorSearchStatistics",
+      "properties": {
+        "indexUnusedReasons": {
+          "description": "When `indexUsageMode` is `UNUSED` or `PARTIALLY_USED`, this field explains why indexes were not used in all or part of the vector search query. If `indexUsageMode` is `FULLY_USED`, this field is not populated.",
+          "items": {
+            "$ref": "IndexUnusedReason"
+          },
+          "type": "array"
+        },
+        "indexUsageMode": {
+          "description": "Specifies the index usage mode for the query.",
+          "enum": [
+            "INDEX_USAGE_MODE_UNSPECIFIED",
+            "UNUSED",
+            "PARTIALLY_USED",
+            "FULLY_USED"
+          ],
+          "enumDescriptions": [
+            "Index usage mode not specified.",
+            "No vector indexes were used in the vector search query. See [`indexUnusedReasons`] (/bigquery/docs/reference/rest/v2/Job#IndexUnusedReason) for detailed reasons.",
+            "Part of the vector search query used vector indexes. See [`indexUnusedReasons`] (/bigquery/docs/reference/rest/v2/Job#IndexUnusedReason) for why other parts of the query did not use vector indexes.",
+            "The entire vector search query used vector indexes."
+          ],
+          "type": "string"
+        }
+      },
+      "type": "object"
+    },
     "ViewDefinition": {
       "id": "ViewDefinition",
       "properties": {
diff --git a/googleapiclient/discovery_cache/documents/bigqueryconnection.v1beta1.json b/googleapiclient/discovery_cache/documents/bigqueryconnection.v1beta1.json
index eb0468c9b6a..0b72d152ea0 100644
--- a/googleapiclient/discovery_cache/documents/bigqueryconnection.v1beta1.json
+++ b/googleapiclient/discovery_cache/documents/bigqueryconnection.v1beta1.json
@@ -395,7 +395,7 @@
       }
     }
   },
-  "revision": "20231021",
+  "revision": "20231203",
   "rootUrl": "https://bigqueryconnection.googleapis.com/",
   "schemas": {
     "AuditConfig": {
diff --git a/googleapiclient/discovery_cache/documents/bigquerydatatransfer.v1.json b/googleapiclient/discovery_cache/documents/bigquerydatatransfer.v1.json
index 99b7e1eaa75..4cb2fcac966 100644
--- a/googleapiclient/discovery_cache/documents/bigquerydatatransfer.v1.json
+++ b/googleapiclient/discovery_cache/documents/bigquerydatatransfer.v1.json
@@ -1342,7 +1342,7 @@
       }
     }
   },
-  "revision": "20231129",
+  "revision": "20231203",
   "rootUrl": "https://bigquerydatatransfer.googleapis.com/",
   "schemas": {
     "CheckValidCredsRequest": {
diff --git a/googleapiclient/discovery_cache/documents/bigqueryreservation.v1.json b/googleapiclient/discovery_cache/documents/bigqueryreservation.v1.json
index 66cd2790c48..29894976b91 100644
--- a/googleapiclient/discovery_cache/documents/bigqueryreservation.v1.json
+++ b/googleapiclient/discovery_cache/documents/bigqueryreservation.v1.json
@@ -831,7 +831,7 @@
       }
     }
   },
-  "revision": "20231205",
+  "revision": "20231209",
   "rootUrl": "https://bigqueryreservation.googleapis.com/",
   "schemas": {
     "Assignment": {
diff --git a/googleapiclient/discovery_cache/documents/bigtableadmin.v2.json b/googleapiclient/discovery_cache/documents/bigtableadmin.v2.json
index f088f7534fd..04d4dd69193 100644
--- a/googleapiclient/discovery_cache/documents/bigtableadmin.v2.json
+++ b/googleapiclient/discovery_cache/documents/bigtableadmin.v2.json
@@ -1966,7 +1966,7 @@
       }
     }
   },
-  "revision": "20231127",
+  "revision": "20231208",
   "rootUrl": "https://bigtableadmin.googleapis.com/",
   "schemas": {
     "AppProfile": {
diff --git a/googleapiclient/discovery_cache/documents/binaryauthorization.v1.json b/googleapiclient/discovery_cache/documents/binaryauthorization.v1.json
index 609168d323c..a225505359a 100644
--- a/googleapiclient/discovery_cache/documents/binaryauthorization.v1.json
+++ b/googleapiclient/discovery_cache/documents/binaryauthorization.v1.json
@@ -706,7 +706,7 @@
       }
     }
   },
-  "revision": "20231110",
+  "revision": "20231208",
   "rootUrl": "https://binaryauthorization.googleapis.com/",
   "schemas": {
     "AdmissionRule": {
diff --git a/googleapiclient/discovery_cache/documents/binaryauthorization.v1beta1.json b/googleapiclient/discovery_cache/documents/binaryauthorization.v1beta1.json
index 6a4cb8def24..4c6b97e8789 100644
--- a/googleapiclient/discovery_cache/documents/binaryauthorization.v1beta1.json
+++ b/googleapiclient/discovery_cache/documents/binaryauthorization.v1beta1.json
@@ -551,7 +551,7 @@
       }
     }
   },
-  "revision": "20231110",
+  "revision": "20231208",
   "rootUrl": "https://binaryauthorization.googleapis.com/",
   "schemas": {
     "AdmissionRule": {
diff --git a/googleapiclient/discovery_cache/documents/blogger.v2.json b/googleapiclient/discovery_cache/documents/blogger.v2.json
index b68dc22072a..856eb90b8dd 100644
--- a/googleapiclient/discovery_cache/documents/blogger.v2.json
+++ b/googleapiclient/discovery_cache/documents/blogger.v2.json
@@ -401,7 +401,7 @@
       }
     }
   },
-  "revision": "20231205",
+  "revision": "20231210",
   "rootUrl": "https://blogger.googleapis.com/",
   "schemas": {
     "Blog": {
diff --git a/googleapiclient/discovery_cache/documents/blogger.v3.json b/googleapiclient/discovery_cache/documents/blogger.v3.json
index 53fe290974d..9ca2fced29d 100644
--- a/googleapiclient/discovery_cache/documents/blogger.v3.json
+++ b/googleapiclient/discovery_cache/documents/blogger.v3.json
@@ -1710,7 +1710,7 @@
       }
     }
   },
-  "revision": "20231205",
+  "revision": "20231210",
   "rootUrl": "https://blogger.googleapis.com/",
   "schemas": {
     "Blog": {
diff --git a/googleapiclient/discovery_cache/documents/businessprofileperformance.v1.json b/googleapiclient/discovery_cache/documents/businessprofileperformance.v1.json
index 8be8a52a02b..248e3e20678 100644
--- a/googleapiclient/discovery_cache/documents/businessprofileperformance.v1.json
+++ b/googleapiclient/discovery_cache/documents/businessprofileperformance.v1.json
@@ -417,7 +417,7 @@
       }
     }
   },
-  "revision": "20231205",
+  "revision": "20231210",
   "rootUrl": "https://businessprofileperformance.googleapis.com/",
   "schemas": {
     "DailyMetricTimeSeries": {
diff --git a/googleapiclient/discovery_cache/documents/calendar.v3.json b/googleapiclient/discovery_cache/documents/calendar.v3.json
index 21bdbdaa7ec..2393bde0e8a 100644
--- a/googleapiclient/discovery_cache/documents/calendar.v3.json
+++ b/googleapiclient/discovery_cache/documents/calendar.v3.json
@@ -1735,7 +1735,7 @@
       }
     }
   },
-  "revision": "20231123",
+  "revision": "20231201",
   "rootUrl": "https://www.googleapis.com/",
   "schemas": {
     "Acl": {
diff --git a/googleapiclient/discovery_cache/documents/certificatemanager.v1.json b/googleapiclient/discovery_cache/documents/certificatemanager.v1.json
index b1f49a1eaa4..d953d2e17e6 100644
--- a/googleapiclient/discovery_cache/documents/certificatemanager.v1.json
+++ b/googleapiclient/discovery_cache/documents/certificatemanager.v1.json
@@ -1280,7 +1280,7 @@
       }
     }
   },
-  "revision": "20231103",
+  "revision": "20231204",
   "rootUrl": "https://certificatemanager.googleapis.com/",
   "schemas": {
     "AuthorizationAttemptInfo": {
diff --git a/googleapiclient/discovery_cache/documents/chat.v1.json b/googleapiclient/discovery_cache/documents/chat.v1.json
index 2fdeb36d2e4..846b84d06fc 100644
--- a/googleapiclient/discovery_cache/documents/chat.v1.json
+++ b/googleapiclient/discovery_cache/documents/chat.v1.json
@@ -962,7 +962,7 @@
       }
     }
   },
-  "revision": "20231203",
+  "revision": "20231207",
   "rootUrl": "https://chat.googleapis.com/",
   "schemas": {
     "ActionParameter": {
diff --git a/googleapiclient/discovery_cache/documents/checks.v1alpha.json b/googleapiclient/discovery_cache/documents/checks.v1alpha.json
index 69651ee1a22..05e07495c27 100644
--- a/googleapiclient/discovery_cache/documents/checks.v1alpha.json
+++ b/googleapiclient/discovery_cache/documents/checks.v1alpha.json
@@ -414,7 +414,7 @@
       }
     }
   },
-  "revision": "20231205",
+  "revision": "20231210",
   "rootUrl": "https://checks.googleapis.com/",
   "schemas": {
     "CancelOperationRequest": {
diff --git a/googleapiclient/discovery_cache/documents/chromemanagement.v1.json b/googleapiclient/discovery_cache/documents/chromemanagement.v1.json
index 4ee4d1bfb84..93ed87816f1 100644
--- a/googleapiclient/discovery_cache/documents/chromemanagement.v1.json
+++ b/googleapiclient/discovery_cache/documents/chromemanagement.v1.json
@@ -1040,7 +1040,7 @@
       }
     }
   },
-  "revision": "20231205",
+  "revision": "20231210",
   "rootUrl": "https://chromemanagement.googleapis.com/",
   "schemas": {
     "GoogleChromeManagementV1AndroidAppInfo": {
diff --git a/googleapiclient/discovery_cache/documents/chromepolicy.v1.json b/googleapiclient/discovery_cache/documents/chromepolicy.v1.json
index 4b54658bafb..a41bd6cf75b 100644
--- a/googleapiclient/discovery_cache/documents/chromepolicy.v1.json
+++ b/googleapiclient/discovery_cache/documents/chromepolicy.v1.json
@@ -557,7 +557,7 @@
       }
     }
   },
-  "revision": "20231205",
+  "revision": "20231210",
   "rootUrl": "https://chromepolicy.googleapis.com/",
   "schemas": {
     "GoogleChromePolicyVersionsV1AdditionalTargetKeyName": {
diff --git a/googleapiclient/discovery_cache/documents/cloudbilling.v1.json b/googleapiclient/discovery_cache/documents/cloudbilling.v1.json
index da03aa53ccf..3f6318ecd5a 100644
--- a/googleapiclient/discovery_cache/documents/cloudbilling.v1.json
+++ b/googleapiclient/discovery_cache/documents/cloudbilling.v1.json
@@ -121,7 +121,7 @@
           "parameterOrder": [],
           "parameters": {
             "parent": {
-              "description": "Optional. The parent to create a billing account from. Format: - organizations/{organization_id} eg organizations/12345678 - billingAccounts/{billing_account_id} eg `billingAccounts/012345-567890-ABCDEF`",
+              "description": "Optional. The parent to create a billing account from. Format: - `organizations/{organization_id}`, for example, `organizations/12345678` - `billingAccounts/{billing_account_id}`, for example, `billingAccounts/012345-567890-ABCDEF`",
               "location": "query",
               "type": "string"
             }
@@ -206,7 +206,7 @@
           "parameterOrder": [],
           "parameters": {
             "filter": {
-              "description": "Options for how to filter the returned billing accounts. This only supports filtering for [subaccounts](https://cloud.google.com/billing/docs/concepts) under a single provided parent billing account. (e.g. \"master_billing_account=billingAccounts/012345-678901-ABCDEF\"). Boolean algebra and other fields are not currently supported.",
+              "description": "Options for how to filter the returned billing accounts. This only supports filtering for [subaccounts](https://cloud.google.com/billing/docs/concepts) under a single provided parent billing account. (for example, `master_billing_account=billingAccounts/012345-678901-ABCDEF`). Boolean algebra and other fields are not currently supported.",
               "location": "query",
               "type": "string"
             },
@@ -222,7 +222,7 @@
               "type": "string"
             },
             "parent": {
-              "description": "Optional. The parent resource to list billing accounts from. Format: - organizations/{organization_id} eg organizations/12345678 - billingAccounts/{billing_account_id} eg `billingAccounts/012345-567890-ABCDEF`",
+              "description": "Optional. The parent resource to list billing accounts from. Format: - `organizations/{organization_id}`, for example, `organizations/12345678` - `billingAccounts/{billing_account_id}`, for example, `billingAccounts/012345-567890-ABCDEF`",
               "location": "query",
               "type": "string"
             }
@@ -416,7 +416,7 @@
               ],
               "parameters": {
                 "parent": {
-                  "description": "Optional. The parent to create a billing account from. Format: - organizations/{organization_id} eg organizations/12345678 - billingAccounts/{billing_account_id} eg `billingAccounts/012345-567890-ABCDEF`",
+                  "description": "Optional. The parent to create a billing account from. Format: - `organizations/{organization_id}`, for example, `organizations/12345678` - `billingAccounts/{billing_account_id}`, for example, `billingAccounts/012345-567890-ABCDEF`",
                   "location": "path",
                   "pattern": "^billingAccounts/[^/]+$",
                   "required": true,
@@ -445,7 +445,7 @@
               ],
               "parameters": {
                 "filter": {
-                  "description": "Options for how to filter the returned billing accounts. This only supports filtering for [subaccounts](https://cloud.google.com/billing/docs/concepts) under a single provided parent billing account. (e.g. \"master_billing_account=billingAccounts/012345-678901-ABCDEF\"). Boolean algebra and other fields are not currently supported.",
+                  "description": "Options for how to filter the returned billing accounts. This only supports filtering for [subaccounts](https://cloud.google.com/billing/docs/concepts) under a single provided parent billing account. (for example, `master_billing_account=billingAccounts/012345-678901-ABCDEF`). Boolean algebra and other fields are not currently supported.",
                   "location": "query",
                   "type": "string"
                 },
@@ -461,7 +461,7 @@
                   "type": "string"
                 },
                 "parent": {
-                  "description": "Optional. The parent resource to list billing accounts from. Format: - organizations/{organization_id} eg organizations/12345678 - billingAccounts/{billing_account_id} eg `billingAccounts/012345-567890-ABCDEF`",
+                  "description": "Optional. The parent resource to list billing accounts from. Format: - `organizations/{organization_id}`, for example, `organizations/12345678` - `billingAccounts/{billing_account_id}`, for example, `billingAccounts/012345-567890-ABCDEF`",
                   "location": "path",
                   "pattern": "^billingAccounts/[^/]+$",
                   "required": true,
@@ -496,7 +496,7 @@
               ],
               "parameters": {
                 "parent": {
-                  "description": "Optional. The parent to create a billing account from. Format: - organizations/{organization_id} eg organizations/12345678 - billingAccounts/{billing_account_id} eg `billingAccounts/012345-567890-ABCDEF`",
+                  "description": "Optional. The parent to create a billing account from. Format: - `organizations/{organization_id}`, for example, `organizations/12345678` - `billingAccounts/{billing_account_id}`, for example, `billingAccounts/012345-567890-ABCDEF`",
                   "location": "path",
                   "pattern": "^organizations/[^/]+$",
                   "required": true,
@@ -525,7 +525,7 @@
               ],
               "parameters": {
                 "filter": {
-                  "description": "Options for how to filter the returned billing accounts. This only supports filtering for [subaccounts](https://cloud.google.com/billing/docs/concepts) under a single provided parent billing account. (e.g. \"master_billing_account=billingAccounts/012345-678901-ABCDEF\"). Boolean algebra and other fields are not currently supported.",
+                  "description": "Options for how to filter the returned billing accounts. This only supports filtering for [subaccounts](https://cloud.google.com/billing/docs/concepts) under a single provided parent billing account. (for example, `master_billing_account=billingAccounts/012345-678901-ABCDEF`). Boolean algebra and other fields are not currently supported.",
                   "location": "query",
                   "type": "string"
                 },
@@ -541,7 +541,7 @@
                   "type": "string"
                 },
                 "parent": {
-                  "description": "Optional. The parent resource to list billing accounts from. Format: - organizations/{organization_id} eg organizations/12345678 - billingAccounts/{billing_account_id} eg `billingAccounts/012345-567890-ABCDEF`",
+                  "description": "Optional. The parent resource to list billing accounts from. Format: - `organizations/{organization_id}`, for example, `organizations/12345678` - `billingAccounts/{billing_account_id}`, for example, `billingAccounts/012345-567890-ABCDEF`",
                   "location": "path",
                   "pattern": "^organizations/[^/]+$",
                   "required": true,
@@ -751,7 +751,7 @@
       }
     }
   },
-  "revision": "20231201",
+  "revision": "20231208",
   "rootUrl": "https://cloudbilling.googleapis.com/",
   "schemas": {
     "AggregationInfo": {
@@ -863,7 +863,7 @@
           "type": "boolean"
         },
         "parent": {
-          "description": "Output only. The billing account's parent resource identifier. Use the `MoveBillingAccount` method to update the account's parent resource if it is a organization. Format: - organizations/{organization_id}, for example: organizations/12345678 - billingAccounts/{billing_account_id}, for example: `billingAccounts/012345-567890-ABCDEF`",
+          "description": "Output only. The billing account's parent resource identifier. Use the `MoveBillingAccount` method to update the account's parent resource if it is a organization. Format: - `organizations/{organization_id}`, for example, `organizations/12345678` - `billingAccounts/{billing_account_id}`, for example, `billingAccounts/012345-567890-ABCDEF`",
           "readOnly": true,
           "type": "string"
         }
diff --git a/googleapiclient/discovery_cache/documents/cloudbilling.v1beta.json b/googleapiclient/discovery_cache/documents/cloudbilling.v1beta.json
index c79e311d076..3db54b2c740 100644
--- a/googleapiclient/discovery_cache/documents/cloudbilling.v1beta.json
+++ b/googleapiclient/discovery_cache/documents/cloudbilling.v1beta.json
@@ -463,6 +463,53 @@
                   ]
                 }
               }
+            },
+            "prices": {
+              "methods": {
+                "list": {
+                  "description": "Lists the latest prices for SKUs available to your Cloud Billing account.",
+                  "flatPath": "v1beta/billingAccounts/{billingAccountsId}/skus/{skusId}/prices",
+                  "httpMethod": "GET",
+                  "id": "cloudbilling.billingAccounts.skus.prices.list",
+                  "parameterOrder": [
+                    "parent"
+                  ],
+                  "parameters": {
+                    "currencyCode": {
+                      "description": "Optional. ISO-4217 currency code for the price. If not specified, currency of billing account will be used.",
+                      "location": "query",
+                      "type": "string"
+                    },
+                    "pageSize": {
+                      "description": "Optional. Maximum number of billing account price to return. Results may return fewer than this value. Default value is 50 and maximum value is 5000.",
+                      "format": "int32",
+                      "location": "query",
+                      "type": "integer"
+                    },
+                    "pageToken": {
+                      "description": "Optional. Page token received from a previous ListBillingAccountPrices call to retrieve the next page of results. If this field is empty, the first page is returned.",
+                      "location": "query",
+                      "type": "string"
+                    },
+                    "parent": {
+                      "description": "Required. To list all Billing Account SKUs, use `-` as the SKU ID. Format: `billingAccounts/{billing_account}/skus/-` Note: Specifying an actual SKU resource id will return a collection of one Billing Account Price.",
+                      "location": "path",
+                      "pattern": "^billingAccounts/[^/]+/skus/[^/]+$",
+                      "required": true,
+                      "type": "string"
+                    }
+                  },
+                  "path": "v1beta/{+parent}/prices",
+                  "response": {
+                    "$ref": "GoogleCloudBillingBillingaccountpricesV1betaListBillingAccountPricesResponse"
+                  },
+                  "scopes": [
+                    "https://www.googleapis.com/auth/cloud-billing",
+                    "https://www.googleapis.com/auth/cloud-billing.readonly",
+                    "https://www.googleapis.com/auth/cloud-platform"
+                  ]
+                }
+              }
             }
           }
         }
@@ -636,6 +683,53 @@
               ]
             }
           }
+        },
+        "prices": {
+          "methods": {
+            "list": {
+              "description": "Lists the latest prices for all SKUs.",
+              "flatPath": "v1beta/skus/{skusId}/prices",
+              "httpMethod": "GET",
+              "id": "cloudbilling.skus.prices.list",
+              "parameterOrder": [
+                "parent"
+              ],
+              "parameters": {
+                "currencyCode": {
+                  "description": "Optional. ISO-4217 currency code for the price. If not specified, USD will be used.",
+                  "location": "query",
+                  "type": "string"
+                },
+                "pageSize": {
+                  "description": "Optional. Maximum number of prices to return. Results may return fewer than this value. Default value is 50 and maximum value is 5000.",
+                  "format": "int32",
+                  "location": "query",
+                  "type": "integer"
+                },
+                "pageToken": {
+                  "description": "Optional. Page token received from a previous ListPrices call to retrieve the next page of results. If this field is empty, the first page is returned.",
+                  "location": "query",
+                  "type": "string"
+                },
+                "parent": {
+                  "description": "Required. To list the prices for all SKUs, use `-` as the SKU ID. Format: `skus/-` Specifying a specific SKU ID returns a collection with one Price object for the SKU.",
+                  "location": "path",
+                  "pattern": "^skus/[^/]+$",
+                  "required": true,
+                  "type": "string"
+                }
+              },
+              "path": "v1beta/{+parent}/prices",
+              "response": {
+                "$ref": "GoogleCloudBillingPricesV1betaListPricesResponse"
+              },
+              "scopes": [
+                "https://www.googleapis.com/auth/cloud-billing",
+                "https://www.googleapis.com/auth/cloud-billing.readonly",
+                "https://www.googleapis.com/auth/cloud-platform"
+              ]
+            }
+          }
         }
       }
     },
@@ -664,7 +758,7 @@
       }
     }
   },
-  "revision": "20231201",
+  "revision": "20231208",
   "rootUrl": "https://cloudbilling.googleapis.com/",
   "schemas": {
     "CacheFillRegions": {
@@ -719,11 +813,11 @@
       "type": "object"
     },
     "CloudCdnEgressWorkload": {
-      "description": "Specifies usage for Cloud CDN egress.",
+      "description": "Specifies usage for Cloud CDN Data Transfer.",
       "id": "CloudCdnEgressWorkload",
       "properties": {
         "cacheEgressDestination": {
-          "description": "The destination for the cache egress charges.",
+          "description": "The destination for the cache data transfer.",
           "enum": [
             "CACHE_EGRESS_DESTINATION_UNSPECIFIED",
             "CACHE_EGRESS_DESTINATION_ASIA_PACIFIC",
@@ -748,7 +842,7 @@
         },
         "cacheEgressRate": {
           "$ref": "Usage",
-          "description": "Cache egress usage. The rate of data cache egressed in the destination. For example : units such as \"GiBy/s\" or \"TBy/mo\"."
+          "description": "Cache data transfer usage. The rate of data cache transferred to the destination. Use units such as GiB/s or TiB/mo."
         }
       },
       "type": "object"
@@ -787,15 +881,15 @@
       "type": "object"
     },
     "CloudInterconnectEgressWorkload": {
-      "description": "The interconnect egress only includes the Interconnect Egress. Please use the standard egress traffic interface to specify your standard egress usage.",
+      "description": "Includes the estimate for Interconnect Data Transfer only. To specify usage for data transfer between VMs and internet end-points, use the Standard Tier Internet Data Transfer interface.",
       "id": "CloudInterconnectEgressWorkload",
       "properties": {
         "egressRate": {
           "$ref": "Usage",
-          "description": "Data egress usage. This usage applies when you move or copy data from one Google Cloud service to another service. Expected units such as \"GiBy/s, By/s, etc.\""
+          "description": "Outbound data transfer usage. This usage applies when you move or copy data from one Google Cloud service to another service. The units are \"GiB/s, B/s, and so on.\""
         },
         "interconnectConnectionLocation": {
-          "description": "Locations in the [Interconnect connection location table](https://cloud.google.com/vpc/network-pricing#interconnect-pricing). This is the interconnect egress charges.",
+          "description": "Locations in the [Interconnect connection location table](https://cloud.google.com/vpc/network-pricing#interconnect-pricing). These are the Interconnect Data Transfer charges.",
           "enum": [
             "INTERCONNECT_CONNECTION_LOCATION_UNSPECIFIED",
             "INTERCONNECT_CONNECTION_LOCATION_ASIA",
@@ -864,7 +958,7 @@
       "type": "object"
     },
     "CloudStorageEgressWorkload": {
-      "description": "Specification of a network type. Network egress within Google Cloud applies when you move or copy data from one Cloud Storage bucket to another or when another Google Cloud service accesses data in your Cloud Storage bucket.This includes the network egress within Google Cloud and the general network usage.",
+      "description": "Specification of a network type. Network data transfer within Google Cloud applies when you move or copy data from one Cloud Storage bucket to another or when another Google Cloud service accesses data in your Cloud Storage bucket.This includes the network data transfer within Google Cloud and the general network usage.",
       "id": "CloudStorageEgressWorkload",
       "properties": {
         "destinationContinent": {
@@ -889,7 +983,7 @@
         },
         "egressRate": {
           "$ref": "Usage",
-          "description": "Egress usage rate. This usage applies when you move or copy data from one Cloud Storage bucket to another or when another Google Cloud service accesses data in your Cloud Storage bucket. Expected units such as \"GiBy/s, By/s, ...\""
+          "description": "Data transfer usage rate. This usage applies when you move or copy data from one Cloud Storage bucket to another or when another Google Cloud service accesses data in your Cloud Storage bucket. Expected units such as \"GiB/s, B/s, ...\""
         },
         "sourceContinent": {
           "description": "Where the data comes from.",
@@ -1342,6 +1436,24 @@
       },
       "type": "object"
     },
+    "GoogleCloudBillingBillingaccountpricesV1betaListBillingAccountPricesResponse": {
+      "description": "Response message for ListBillingAccountPrices.",
+      "id": "GoogleCloudBillingBillingaccountpricesV1betaListBillingAccountPricesResponse",
+      "properties": {
+        "billingAccountPrices": {
+          "description": "The returned billing account prices.",
+          "items": {
+            "$ref": "GoogleCloudBillingBillingaccountpricesV1betaBillingAccountPrice"
+          },
+          "type": "array"
+        },
+        "nextPageToken": {
+          "description": "Token that can be sent as `page_token` in the subsequent request to retrieve the next page. If this field is empty, there are no subsequent pages.",
+          "type": "string"
+        }
+      },
+      "type": "object"
+    },
     "GoogleCloudBillingBillingaccountpricesV1betaListPriceAsCeiling": {
       "description": "Encapsulates a contract feature that the list price (DefaultPrice) will be used for the price if the current list price drops lower than the custom fixed price. Available to new contracts after March 21, 2022. Applies to all fixed price SKUs in the contract, including FixedPrice, FixedDiscount, MigratedPrice, and MergedPrice.",
       "id": "GoogleCloudBillingBillingaccountpricesV1betaListPriceAsCeiling",
@@ -1875,6 +1987,24 @@
       },
       "type": "object"
     },
+    "GoogleCloudBillingPricesV1betaListPricesResponse": {
+      "description": "Response message for ListPrices.",
+      "id": "GoogleCloudBillingPricesV1betaListPricesResponse",
+      "properties": {
+        "nextPageToken": {
+          "description": "Token that can be sent as `page_token` in the subsequent request to retrieve the next page. If this field is empty, there are no subsequent pages.",
+          "type": "string"
+        },
+        "prices": {
+          "description": "The returned publicly listed prices.",
+          "items": {
+            "$ref": "GoogleCloudBillingPricesV1betaPrice"
+          },
+          "type": "array"
+        }
+      },
+      "type": "object"
+    },
     "GoogleCloudBillingPricesV1betaPrice": {
       "description": "Encapsulates the latest price for a SKU.",
       "id": "GoogleCloudBillingPricesV1betaPrice",
@@ -2155,31 +2285,31 @@
       "type": "object"
     },
     "InterRegionEgress": {
-      "description": "Egress traffic between two regions.",
+      "description": "Data transfer between two regions.",
       "id": "InterRegionEgress",
       "properties": {
         "destinationRegion": {
-          "description": "Which [region](https://cloud.google.com/compute/docs/regions-zones) the egress data goes to.",
+          "description": "Which [region](https://cloud.google.com/compute/docs/regions-zones) the data is transferred to.",
           "type": "string"
         },
         "egressRate": {
           "$ref": "Usage",
-          "description": "VM to VM egress usage. Expected units such as \"GiBy/s, By/s, etc.\""
+          "description": "VM to VM data transfer usage. The expected units such are GiB/s, B/s, and so on."
         },
         "sourceRegion": {
-          "description": "Which [region](https://cloud.google.com/compute/docs/regions-zones) the egress data comes from.",
+          "description": "Which [region](https://cloud.google.com/compute/docs/regions-zones) the data is transferred from.",
           "type": "string"
         }
       },
       "type": "object"
     },
     "IntraRegionEgress": {
-      "description": "Egress traffic within the same region. When source region and destination region are in the same zone, using the internal IP addresses, there isn't any egress charge.",
+      "description": "Data transfer within the same region. When the source region and destination region are in the same zone, using internal IP addresses, there isn't any charge for data transfer.",
       "id": "IntraRegionEgress",
       "properties": {
         "egressRate": {
           "$ref": "Usage",
-          "description": "VM to VM egress usage. Expected units such as \"GiBy/s, By/s, etc.\""
+          "description": "VM to VM data transfer usage. The expected are GiB/s, B/s, and so on."
         }
       },
       "type": "object"
@@ -2274,7 +2404,7 @@
       "type": "object"
     },
     "PremiumTierEgressWorkload": {
-      "description": "Specify Premium Tier Internet egress networking.",
+      "description": "Specify a Premium Tier Internet Data Transfer networking workload.",
       "id": "PremiumTierEgressWorkload",
       "properties": {
         "destinationContinent": {
@@ -2313,10 +2443,10 @@
         },
         "egressRate": {
           "$ref": "Usage",
-          "description": "Premium Tier egress usage. Expected units such as \"GiBy/s, By/s, etc.\""
+          "description": "Premium Tier Data Transfer usage. The expected units are GiB/s, B/s, and so on."
         },
         "sourceRegion": {
-          "description": "Which [region](https://cloud.google.com/compute/docs/regions-zones) the egress data comes from.",
+          "description": "Which [region](https://cloud.google.com/compute/docs/regions-zones) the data comes from.",
           "type": "string"
         }
       },
@@ -2479,15 +2609,15 @@
       "type": "object"
     },
     "StandardTierEgressWorkload": {
-      "description": "Specify Standard Tier Internet egress networking.",
+      "description": "Specify Standard Tier Internet Data Transfer.",
       "id": "StandardTierEgressWorkload",
       "properties": {
         "egressRate": {
           "$ref": "Usage",
-          "description": "Standard tier egress usage. Expected units such as \"GiBy/s, By/s, etc.\""
+          "description": "Standard Tier Data Transfer usage. The expected units are GiB/s, B/s, and so on."
         },
         "sourceRegion": {
-          "description": "Which [region](https://cloud.google.com/compute/docs/regions-zones) the egress data comes from.",
+          "description": "Which [region](https://cloud.google.com/compute/docs/regions-zones) the data is transferred from.",
           "type": "string"
         }
       },
@@ -2627,7 +2757,7 @@
       "type": "object"
     },
     "VmToVmEgressWorkload": {
-      "description": "Specify VM to VM egress.",
+      "description": "Specify VM to VM data transfer.",
       "id": "VmToVmEgressWorkload",
       "properties": {
         "interRegionEgress": {
@@ -2645,7 +2775,7 @@
       "properties": {
         "cloudCdnEgressWorkload": {
           "$ref": "CloudCdnEgressWorkload",
-          "description": "Usage on Google Cloud CDN Egress."
+          "description": "Usage on Google Cloud CDN Data Transfer."
         },
         "cloudCdnWorkload": {
           "$ref": "CloudCdnWorkload",
@@ -2653,7 +2783,7 @@
         },
         "cloudInterconnectEgressWorkload": {
           "$ref": "CloudInterconnectEgressWorkload",
-          "description": "Usage on Google Cloud Interconnect Egress."
+          "description": "Usage on Google Cloud Interconnect Data Transfer."
         },
         "cloudInterconnectWorkload": {
           "$ref": "CloudInterconnectWorkload",
@@ -2661,7 +2791,7 @@
         },
         "cloudStorageEgressWorkload": {
           "$ref": "CloudStorageEgressWorkload",
-          "description": "Usage on a cloud storage egress."
+          "description": "Usage on Cloud Storage Data Transfer."
         },
         "cloudStorageWorkload": {
           "$ref": "CloudStorageWorkload",
@@ -2677,15 +2807,15 @@
         },
         "premiumTierEgressWorkload": {
           "$ref": "PremiumTierEgressWorkload",
-          "description": "Usage on Premium Tier Internet Egress."
+          "description": "Usage on Premium Tier Internet Data Transfer."
         },
         "standardTierEgressWorkload": {
           "$ref": "StandardTierEgressWorkload",
-          "description": "Usage on Standard Tier Internet Egress."
+          "description": "Usage on Standard Tier Internet Data Transfer."
         },
         "vmToVmEgressWorkload": {
           "$ref": "VmToVmEgressWorkload",
-          "description": "Usage on Vm to Vm Egress."
+          "description": "Usage on VM to VM Data Transfer."
         }
       },
       "type": "object"
diff --git a/googleapiclient/discovery_cache/documents/cloudbuild.v1.json b/googleapiclient/discovery_cache/documents/cloudbuild.v1.json
index 01079221045..e3bcf956201 100644
--- a/googleapiclient/discovery_cache/documents/cloudbuild.v1.json
+++ b/googleapiclient/discovery_cache/documents/cloudbuild.v1.json
@@ -2319,7 +2319,7 @@
       }
     }
   },
-  "revision": "20231127",
+  "revision": "20231203",
   "rootUrl": "https://cloudbuild.googleapis.com/",
   "schemas": {
     "ApprovalConfig": {
diff --git a/googleapiclient/discovery_cache/documents/cloudbuild.v2.json b/googleapiclient/discovery_cache/documents/cloudbuild.v2.json
index 94dd9be1b25..641e274c9dc 100644
--- a/googleapiclient/discovery_cache/documents/cloudbuild.v2.json
+++ b/googleapiclient/discovery_cache/documents/cloudbuild.v2.json
@@ -844,7 +844,7 @@
       }
     }
   },
-  "revision": "20231127",
+  "revision": "20231203",
   "rootUrl": "https://cloudbuild.googleapis.com/",
   "schemas": {
     "AuditConfig": {
diff --git a/googleapiclient/discovery_cache/documents/cloudchannel.v1.json b/googleapiclient/discovery_cache/documents/cloudchannel.v1.json
index 0d4f9677af6..3d043da997f 100644
--- a/googleapiclient/discovery_cache/documents/cloudchannel.v1.json
+++ b/googleapiclient/discovery_cache/documents/cloudchannel.v1.json
@@ -2178,7 +2178,7 @@
       }
     }
   },
-  "revision": "20231205",
+  "revision": "20231210",
   "rootUrl": "https://cloudchannel.googleapis.com/",
   "schemas": {
     "GoogleCloudChannelV1ActivateEntitlementRequest": {
diff --git a/googleapiclient/discovery_cache/documents/clouddeploy.v1.json b/googleapiclient/discovery_cache/documents/clouddeploy.v1.json
index 6908d6399ca..7d6391f1ef0 100644
--- a/googleapiclient/discovery_cache/documents/clouddeploy.v1.json
+++ b/googleapiclient/discovery_cache/documents/clouddeploy.v1.json
@@ -202,6 +202,218 @@
             }
           },
           "resources": {
+            "customTargetTypes": {
+              "methods": {
+                "create": {
+                  "description": "Creates a new CustomTargetType in a given project and location.",
+                  "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/customTargetTypes",
+                  "httpMethod": "POST",
+                  "id": "clouddeploy.projects.locations.customTargetTypes.create",
+                  "parameterOrder": [
+                    "parent"
+                  ],
+                  "parameters": {
+                    "customTargetTypeId": {
+                      "description": "Required. ID of the `CustomTargetType`.",
+                      "location": "query",
+                      "type": "string"
+                    },
+                    "parent": {
+                      "description": "Required. The parent collection in which the `CustomTargetType` should be created in. Format should be `projects/{project_id}/locations/{location_name}`.",
+                      "location": "path",
+                      "pattern": "^projects/[^/]+/locations/[^/]+$",
+                      "required": true,
+                      "type": "string"
+                    },
+                    "requestId": {
+                      "description": "Optional. A request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. The server will guarantee that for at least 60 minutes since the first request. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).",
+                      "location": "query",
+                      "type": "string"
+                    },
+                    "validateOnly": {
+                      "description": "Optional. If set to true, the request is validated and the user is provided with an expected result, but no actual change is made.",
+                      "location": "query",
+                      "type": "boolean"
+                    }
+                  },
+                  "path": "v1/{+parent}/customTargetTypes",
+                  "request": {
+                    "$ref": "CustomTargetType"
+                  },
+                  "response": {
+                    "$ref": "Operation"
+                  },
+                  "scopes": [
+                    "https://www.googleapis.com/auth/cloud-platform"
+                  ]
+                },
+                "delete": {
+                  "description": "Deletes a single CustomTargetType.",
+                  "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/customTargetTypes/{customTargetTypesId}",
+                  "httpMethod": "DELETE",
+                  "id": "clouddeploy.projects.locations.customTargetTypes.delete",
+                  "parameterOrder": [
+                    "name"
+                  ],
+                  "parameters": {
+                    "allowMissing": {
+                      "description": "Optional. If set to true, then deleting an already deleted or non-existing `CustomTargetType` will succeed.",
+                      "location": "query",
+                      "type": "boolean"
+                    },
+                    "etag": {
+                      "description": "Optional. This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding.",
+                      "location": "query",
+                      "type": "string"
+                    },
+                    "name": {
+                      "description": "Required. The name of the `CustomTargetType` to delete. Format must be `projects/{project_id}/locations/{location_name}/customTargetTypes/{custom_target_type}`.",
+                      "location": "path",
+                      "pattern": "^projects/[^/]+/locations/[^/]+/customTargetTypes/[^/]+$",
+                      "required": true,
+                      "type": "string"
+                    },
+                    "requestId": {
+                      "description": "Optional. A request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. The server will guarantee that for at least 60 minutes after the first request. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).",
+                      "location": "query",
+                      "type": "string"
+                    },
+                    "validateOnly": {
+                      "description": "Optional. If set to true, the request is validated but no actual change is made.",
+                      "location": "query",
+                      "type": "boolean"
+                    }
+                  },
+                  "path": "v1/{+name}",
+                  "response": {
+                    "$ref": "Operation"
+                  },
+                  "scopes": [
+                    "https://www.googleapis.com/auth/cloud-platform"
+                  ]
+                },
+                "get": {
+                  "description": "Gets details of a single CustomTargetType.",
+                  "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/customTargetTypes/{customTargetTypesId}",
+                  "httpMethod": "GET",
+                  "id": "clouddeploy.projects.locations.customTargetTypes.get",
+                  "parameterOrder": [
+                    "name"
+                  ],
+                  "parameters": {
+                    "name": {
+                      "description": "Required. Name of the `CustomTargetType`. Format must be `projects/{project_id}/locations/{location_name}/customTargetTypes/{custom_target_type}`.",
+                      "location": "path",
+                      "pattern": "^projects/[^/]+/locations/[^/]+/customTargetTypes/[^/]+$",
+                      "required": true,
+                      "type": "string"
+                    }
+                  },
+                  "path": "v1/{+name}",
+                  "response": {
+                    "$ref": "CustomTargetType"
+                  },
+                  "scopes": [
+                    "https://www.googleapis.com/auth/cloud-platform"
+                  ]
+                },
+                "list": {
+                  "description": "Lists CustomTargetTypes in a given project and location.",
+                  "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/customTargetTypes",
+                  "httpMethod": "GET",
+                  "id": "clouddeploy.projects.locations.customTargetTypes.list",
+                  "parameterOrder": [
+                    "parent"
+                  ],
+                  "parameters": {
+                    "filter": {
+                      "description": "Optional. Filter custom target types to be returned. See https://google.aip.dev/160 for more details.",
+                      "location": "query",
+                      "type": "string"
+                    },
+                    "orderBy": {
+                      "description": "Optional. Field to sort by. See https://google.aip.dev/132#ordering for more details.",
+                      "location": "query",
+                      "type": "string"
+                    },
+                    "pageSize": {
+                      "description": "Optional. The maximum number of `CustomTargetType` objects to return. The service may return fewer than this value. If unspecified, at most 50 `CustomTargetType` objects will be returned. The maximum value is 1000; values above 1000 will be set to 1000.",
+                      "format": "int32",
+                      "location": "query",
+                      "type": "integer"
+                    },
+                    "pageToken": {
+                      "description": "Optional. A page token, received from a previous `ListCustomTargetTypes` call. Provide this to retrieve the subsequent page. When paginating, all other provided parameters match the call that provided the page token.",
+                      "location": "query",
+                      "type": "string"
+                    },
+                    "parent": {
+                      "description": "Required. The parent that owns this collection of custom target types. Format must be `projects/{project_id}/locations/{location_name}`.",
+                      "location": "path",
+                      "pattern": "^projects/[^/]+/locations/[^/]+$",
+                      "required": true,
+                      "type": "string"
+                    }
+                  },
+                  "path": "v1/{+parent}/customTargetTypes",
+                  "response": {
+                    "$ref": "ListCustomTargetTypesResponse"
+                  },
+                  "scopes": [
+                    "https://www.googleapis.com/auth/cloud-platform"
+                  ]
+                },
+                "patch": {
+                  "description": "Updates a single CustomTargetType.",
+                  "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/customTargetTypes/{customTargetTypesId}",
+                  "httpMethod": "PATCH",
+                  "id": "clouddeploy.projects.locations.customTargetTypes.patch",
+                  "parameterOrder": [
+                    "name"
+                  ],
+                  "parameters": {
+                    "allowMissing": {
+                      "description": "Optional. If set to true, updating a `CustomTargetType` that does not exist will result in the creation of a new `CustomTargetType`.",
+                      "location": "query",
+                      "type": "boolean"
+                    },
+                    "name": {
+                      "description": "Optional. Name of the `CustomTargetType`. Format is `projects/{project}/locations/{location}/customTargetTypes/a-z{0,62}`.",
+                      "location": "path",
+                      "pattern": "^projects/[^/]+/locations/[^/]+/customTargetTypes/[^/]+$",
+                      "required": true,
+                      "type": "string"
+                    },
+                    "requestId": {
+                      "description": "Optional. A request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. The server will guarantee that for at least 60 minutes since the first request. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).",
+                      "location": "query",
+                      "type": "string"
+                    },
+                    "updateMask": {
+                      "description": "Required. Field mask is used to specify the fields to be overwritten in the `CustomTargetType` resource by the update. The fields specified in the update_mask are relative to the resource, not the full request. A field will be overwritten if it is in the mask. If the user does not provide a mask then all fields will be overwritten.",
+                      "format": "google-fieldmask",
+                      "location": "query",
+                      "type": "string"
+                    },
+                    "validateOnly": {
+                      "description": "Optional. If set to true, the request is validated and the user is provided with an expected result, but no actual change is made.",
+                      "location": "query",
+                      "type": "boolean"
+                    }
+                  },
+                  "path": "v1/{+name}",
+                  "request": {
+                    "$ref": "CustomTargetType"
+                  },
+                  "response": {
+                    "$ref": "Operation"
+                  },
+                  "scopes": [
+                    "https://www.googleapis.com/auth/cloud-platform"
+                  ]
+                }
+              }
+            },
             "deliveryPipelines": {
               "methods": {
                 "create": {
@@ -620,7 +832,7 @@
                           "type": "string"
                         },
                         "parent": {
-                          "description": "Required. The parent, which owns this collection of automationRuns. Format must be `projects/{project}/locations/{location}/deliveryPipelines/{delivery_pipeline}`.",
+                          "description": "Required. The parent `Delivery Pipeline`, which owns this collection of automationRuns. Format must be `projects/{project}/locations/{location}/deliveryPipelines/{delivery_pipeline}`.",
                           "location": "path",
                           "pattern": "^projects/[^/]+/locations/[^/]+/deliveryPipelines/[^/]+$",
                           "required": true,
@@ -783,7 +995,7 @@
                           "type": "string"
                         },
                         "parent": {
-                          "description": "Required. The parent, which owns this collection of automations. Format must be `projects/{project_id}/locations/{location_name}/deliveryPipelines/{pipeline_name}`.",
+                          "description": "Required. The parent `Delivery Pipeline`, which owns this collection of automations. Format must be `projects/{project_id}/locations/{location_name}/deliveryPipelines/{pipeline_name}`.",
                           "location": "path",
                           "pattern": "^projects/[^/]+/locations/[^/]+/deliveryPipelines/[^/]+$",
                           "required": true,
@@ -1794,7 +2006,7 @@
       }
     }
   },
-  "revision": "20231110",
+  "revision": "20231130",
   "rootUrl": "https://clouddeploy.googleapis.com/",
   "schemas": {
     "AbandonReleaseRequest": {
@@ -1837,7 +2049,7 @@
       "id": "AdvanceRolloutOperation",
       "properties": {
         "destinationPhase": {
-          "description": "Output only. The phase to which the rollout will be advanced to.",
+          "description": "Output only. The phase the rollout will be advanced to.",
           "readOnly": true,
           "type": "string"
         },
@@ -1982,7 +2194,7 @@
       "type": "object"
     },
     "Automation": {
-      "description": "An `Automation` resource in the Cloud Deploy API. An `Automation` enables the automation of manually driven actions for a Delivery Pipeline, which includes Release promotion amongst Targets, Rollout repair and Rollout deployment strategy advancement. The intention of Automation is to reduce manual intervention in the continuous delivery process.",
+      "description": "An `Automation` resource in the Cloud Deploy API. An `Automation` enables the automation of manually driven actions for a Delivery Pipeline, which includes Release promotion among Targets, Rollout repair and Rollout deployment strategy advancement. The intention of Automation is to reduce manual intervention in the continuous delivery process.",
       "id": "Automation",
       "properties": {
         "annotations": {
@@ -2177,7 +2389,7 @@
       "type": "object"
     },
     "AutomationRun": {
-      "description": "An `AutomationRun` resource in the Cloud Deploy API. An `AutomationRun` represents an automation execution instance of an automation rule.",
+      "description": "An `AutomationRun` resource in the Cloud Deploy API. An `AutomationRun` represents an execution instance of an automation rule.",
       "id": "AutomationRun",
       "properties": {
         "advanceRolloutOperation": {
@@ -2207,7 +2419,7 @@
           "type": "string"
         },
         "expireTime": {
-          "description": "Output only. Time the `AutomationRun` will expire. An `AutomationRun` will expire after 14 days from its creation date.",
+          "description": "Output only. Time the `AutomationRun` expires. An `AutomationRun` expires after 14 days from its creation date.",
           "format": "google-datetime",
           "readOnly": true,
           "type": "string"
@@ -2259,7 +2471,7 @@
           "type": "string"
         },
         "stateDescription": {
-          "description": "Output only. Explains the current state of the `AutomationRun`. Present only an explanation is needed.",
+          "description": "Output only. Explains the current state of the `AutomationRun`. Present only when an explanation is needed.",
           "readOnly": true,
           "type": "string"
         },
@@ -2491,6 +2703,27 @@
         "automaticTrafficControl": {
           "description": "Whether Cloud Deploy should update the traffic stanza in a Cloud Run Service on the user's behalf to facilitate traffic splitting. This is required to be true for CanaryDeployments, but optional for CustomCanaryDeployments.",
           "type": "boolean"
+        },
+        "canaryRevisionTags": {
+          "description": "Optional. A list of tags that are added to the canary revision while the canary deployment is in progress.",
+          "items": {
+            "type": "string"
+          },
+          "type": "array"
+        },
+        "priorRevisionTags": {
+          "description": "Optional. A list of tags that are added to the prior revision while the canary deployment is in progress.",
+          "items": {
+            "type": "string"
+          },
+          "type": "array"
+        },
+        "stableRevisionTags": {
+          "description": "Optional. A list of tags that are added to the final stable revision after the canary deployment is completed.",
+          "items": {
+            "type": "string"
+          },
+          "type": "array"
         }
       },
       "type": "object"
@@ -2607,6 +2840,125 @@
       },
       "type": "object"
     },
+    "CustomMetadata": {
+      "description": "CustomMetadata contains information from a user defined operation.",
+      "id": "CustomMetadata",
+      "properties": {
+        "values": {
+          "additionalProperties": {
+            "type": "string"
+          },
+          "description": "Output only. Key-value pairs provided by the user defined operation.",
+          "readOnly": true,
+          "type": "object"
+        }
+      },
+      "type": "object"
+    },
+    "CustomTarget": {
+      "description": "Information specifying a Custom Target.",
+      "id": "CustomTarget",
+      "properties": {
+        "customTargetType": {
+          "description": "Required. The name of the CustomTargetType. Format must be `projects/{project}/locations/{location}/customTargetTypes/{custom_target_type}`.",
+          "type": "string"
+        }
+      },
+      "type": "object"
+    },
+    "CustomTargetDeployMetadata": {
+      "description": "CustomTargetDeployMetadata contains information from a Custom Target deploy operation.",
+      "id": "CustomTargetDeployMetadata",
+      "properties": {
+        "skipMessage": {
+          "description": "Output only. Skip message provided in the results of a custom deploy operation.",
+          "readOnly": true,
+          "type": "string"
+        }
+      },
+      "type": "object"
+    },
+    "CustomTargetSkaffoldActions": {
+      "description": "CustomTargetSkaffoldActions represents the `CustomTargetType` configuration using Skaffold custom actions.",
+      "id": "CustomTargetSkaffoldActions",
+      "properties": {
+        "deployAction": {
+          "description": "Required. The Skaffold custom action responsible for deploy operations.",
+          "type": "string"
+        },
+        "includeSkaffoldModules": {
+          "description": "Optional. List of Skaffold modules Cloud Deploy will include in the Skaffold Config as required before performing diagnose.",
+          "items": {
+            "$ref": "SkaffoldModules"
+          },
+          "type": "array"
+        },
+        "renderAction": {
+          "description": "Optional. The Skaffold custom action responsible for render operations. If not provided then Cloud Deploy will perform the render operations via `skaffold render`.",
+          "type": "string"
+        }
+      },
+      "type": "object"
+    },
+    "CustomTargetType": {
+      "description": "A `CustomTargetType` resource in the Cloud Deploy API. A `CustomTargetType` defines a type of custom target that can be referenced in a `Target` in order to facilitate deploying to a runtime that does not have a 1P integration with Cloud Deploy.",
+      "id": "CustomTargetType",
+      "properties": {
+        "annotations": {
+          "additionalProperties": {
+            "type": "string"
+          },
+          "description": "Optional. User annotations. These attributes can only be set and used by the user, and not by Cloud Deploy. See https://google.aip.dev/128#annotations for more details such as format and size limitations.",
+          "type": "object"
+        },
+        "createTime": {
+          "description": "Output only. Time at which the `CustomTargetType` was created.",
+          "format": "google-datetime",
+          "readOnly": true,
+          "type": "string"
+        },
+        "customActions": {
+          "$ref": "CustomTargetSkaffoldActions",
+          "description": "Configures render and deploy for the `CustomTargetType` using Skaffold custom actions."
+        },
+        "customTargetTypeId": {
+          "description": "Output only. Resource id of the `CustomTargetType`.",
+          "readOnly": true,
+          "type": "string"
+        },
+        "description": {
+          "description": "Optional. Description of the `CustomTargetType`. Max length is 255 characters.",
+          "type": "string"
+        },
+        "etag": {
+          "description": "Optional. This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding.",
+          "type": "string"
+        },
+        "labels": {
+          "additionalProperties": {
+            "type": "string"
+          },
+          "description": "Optional. Labels are attributes that can be set and used by both the user and by Cloud Deploy. Labels must meet the following constraints: * Keys and values can contain only lowercase letters, numeric characters, underscores, and dashes. * All characters must use UTF-8 encoding, and international characters are allowed. * Keys must start with a lowercase letter or international character. * Each resource is limited to a maximum of 64 labels. Both keys and values are additionally constrained to be <= 128 bytes.",
+          "type": "object"
+        },
+        "name": {
+          "description": "Optional. Name of the `CustomTargetType`. Format is `projects/{project}/locations/{location}/customTargetTypes/a-z{0,62}`.",
+          "type": "string"
+        },
+        "uid": {
+          "description": "Output only. Unique identifier of the `CustomTargetType`.",
+          "readOnly": true,
+          "type": "string"
+        },
+        "updateTime": {
+          "description": "Output only. Most recent time at which the `CustomTargetType` was updated.",
+          "format": "google-datetime",
+          "readOnly": true,
+          "type": "string"
+        }
+      },
+      "type": "object"
+    },
     "Date": {
       "description": "Represents a whole or partial calendar date, such as a birthday. The time of day and time zone are either specified elsewhere or are insignificant. The date is relative to the Gregorian Calendar. This can represent one of the following: * A full date, with non-zero year, month, and day values. * A month and day, with a zero year (for example, an anniversary). * A year on its own, with a zero month and a zero day. * A year and month, with a zero day (for example, a credit card expiration date). Related types: * google.type.TimeOfDay * google.type.DateTime * google.protobuf.Timestamp",
       "id": "Date",
@@ -2804,7 +3156,8 @@
             "EXECUTION_FAILED",
             "DEADLINE_EXCEEDED",
             "MISSING_RESOURCES_FOR_CANARY",
-            "CLOUD_BUILD_REQUEST_FAILED"
+            "CLOUD_BUILD_REQUEST_FAILED",
+            "DEPLOY_FEATURE_NOT_SUPPORTED"
           ],
           "enumDescriptions": [
             "No reason for failure is specified.",
@@ -2812,7 +3165,8 @@
             "The deploy operation did not complete successfully; check Cloud Build logs.",
             "The deploy job run did not complete within the alloted time.",
             "There were missing resources in the runtime environment required for a canary deployment. Check the Cloud Build logs for more information.",
-            "Cloud Build failed to fulfill Cloud Deploy's request. See failure_message for additional details."
+            "Cloud Build failed to fulfill Cloud Deploy's request. See failure_message for additional details.",
+            "The deploy operation had a feature configured that is not supported."
           ],
           "readOnly": true,
           "type": "string"
@@ -2838,6 +3192,16 @@
           "$ref": "CloudRunMetadata",
           "description": "Output only. The name of the Cloud Run Service that is associated with a `DeployJobRun`.",
           "readOnly": true
+        },
+        "custom": {
+          "$ref": "CustomMetadata",
+          "description": "Output only. Custom metadata provided by user defined deploy operation.",
+          "readOnly": true
+        },
+        "customTarget": {
+          "$ref": "CustomTargetDeployMetadata",
+          "description": "Output only. Custom Target metadata associated with a `DeployJobRun`.",
+          "readOnly": true
         }
       },
       "type": "object"
@@ -3323,7 +3687,7 @@
       "id": "ListAutomationsResponse",
       "properties": {
         "automations": {
-          "description": "The `Automations` objects.",
+          "description": "The `Automation` objects.",
           "items": {
             "$ref": "Automation"
           },
@@ -3343,6 +3707,31 @@
       },
       "type": "object"
     },
+    "ListCustomTargetTypesResponse": {
+      "description": "The response object from `ListCustomTargetTypes.`",
+      "id": "ListCustomTargetTypesResponse",
+      "properties": {
+        "customTargetTypes": {
+          "description": "The `CustomTargetType` objects.",
+          "items": {
+            "$ref": "CustomTargetType"
+          },
+          "type": "array"
+        },
+        "nextPageToken": {
+          "description": "A token, which can be sent as `page_token` to retrieve the next page. If this field is omitted, there are no subsequent pages.",
+          "type": "string"
+        },
+        "unreachable": {
+          "description": "Locations that could not be reached.",
+          "items": {
+            "type": "string"
+          },
+          "type": "array"
+        }
+      },
+      "type": "object"
+    },
     "ListDeliveryPipelinesResponse": {
       "description": "The response object from `ListDeliveryPipelines`.",
       "id": "ListDeliveryPipelinesResponse",
@@ -3551,6 +3940,11 @@
           "$ref": "CloudRunMetadata",
           "description": "Output only. The name of the Cloud Run Service that is associated with a `Rollout`.",
           "readOnly": true
+        },
+        "custom": {
+          "$ref": "CustomMetadata",
+          "description": "Output only. Custom metadata provided by user defined `Rollout` operations.",
+          "readOnly": true
         }
       },
       "type": "object"
@@ -4061,6 +4455,14 @@
           "readOnly": true,
           "type": "string"
         },
+        "customTargetTypeSnapshots": {
+          "description": "Output only. Snapshot of the custom target types referenced by the targets taken at release creation time.",
+          "items": {
+            "$ref": "CustomTargetType"
+          },
+          "readOnly": true,
+          "type": "array"
+        },
         "deliveryPipelineSnapshot": {
           "$ref": "DeliveryPipeline",
           "description": "Output only. Snapshot of the parent pipeline taken at release creation time.",
@@ -4175,7 +4577,7 @@
         },
         "skaffoldSupportedCondition": {
           "$ref": "SkaffoldSupportedCondition",
-          "description": "Details around the support state of the release's skaffold version."
+          "description": "Details around the support state of the release's Skaffold version."
         }
       },
       "type": "object"
@@ -4279,6 +4681,11 @@
           "$ref": "CloudRunRenderMetadata",
           "description": "Output only. Metadata associated with rendering for Cloud Run.",
           "readOnly": true
+        },
+        "custom": {
+          "$ref": "CustomMetadata",
+          "description": "Output only. Custom metadata provided by user defined render operation.",
+          "readOnly": true
         }
       },
       "type": "object"
@@ -4325,6 +4732,16 @@
           "readOnly": true,
           "type": "string"
         },
+        "jobId": {
+          "description": "Output only. The job ID for the Job to repair.",
+          "readOnly": true,
+          "type": "string"
+        },
+        "phaseId": {
+          "description": "Output only. The phase ID of the phase that includes the job being repaired.",
+          "readOnly": true,
+          "type": "string"
+        },
         "repairPhases": {
           "description": "Output only. Records of the repair attempts. Each repair phase may have multiple retry attempts or single rollback attempt.",
           "items": {
@@ -4383,7 +4800,7 @@
       "id": "Retry",
       "properties": {
         "attempts": {
-          "description": "Required. Total number of retries. Retry will skipped if set to 0; The minimum value is 1, and the maximum value is 10.",
+          "description": "Required. Total number of retries. Retry is skipped if set to 0; The minimum value is 1, and the maximum value is 10.",
           "format": "int64",
           "type": "string"
         },
@@ -4505,11 +4922,13 @@
           "type": "string"
         },
         "jobId": {
+          "deprecated": true,
           "description": "Output only. The job ID for the Job to retry.",
           "readOnly": true,
           "type": "string"
         },
         "phaseId": {
+          "deprecated": true,
           "description": "Output only. The phase ID of the phase that includes the job being retried.",
           "readOnly": true,
           "type": "string"
@@ -4699,7 +5118,8 @@
             "RELEASE_FAILED",
             "RELEASE_ABANDONED",
             "VERIFICATION_CONFIG_NOT_FOUND",
-            "CLOUD_BUILD_REQUEST_FAILED"
+            "CLOUD_BUILD_REQUEST_FAILED",
+            "OPERATION_FEATURE_NOT_SUPPORTED"
           ],
           "enumDescriptions": [
             "No reason for failure is specified.",
@@ -4708,8 +5128,9 @@
             "Deployment did not complete within the alloted time.",
             "Release is in a failed state.",
             "Release is abandoned.",
-            "No skaffold verify configuration was found.",
-            "Cloud Build failed to fulfill Cloud Deploy's request. See failure_message for additional details."
+            "No Skaffold verify configuration was found.",
+            "Cloud Build failed to fulfill Cloud Deploy's request. See failure_message for additional details.",
+            "A Rollout operation had a feature configured that is not supported."
           ],
           "readOnly": true,
           "type": "string"
@@ -5046,17 +5467,73 @@
       },
       "type": "object"
     },
+    "SkaffoldGCSSource": {
+      "description": "Cloud Storage bucket containing Skaffold Config modules.",
+      "id": "SkaffoldGCSSource",
+      "properties": {
+        "path": {
+          "description": "Optional. Relative path from the source to the Skaffold file.",
+          "type": "string"
+        },
+        "source": {
+          "description": "Required. Cloud Storage source paths to copy recursively. For example, providing \"gs://my-bucket/dir/configs/*\" will result in Skaffold copying all files within the \"dir/configs\" directory in the bucket \"my-bucket\".",
+          "type": "string"
+        }
+      },
+      "type": "object"
+    },
+    "SkaffoldGitSource": {
+      "description": "Git repository containing Skaffold Config modules.",
+      "id": "SkaffoldGitSource",
+      "properties": {
+        "path": {
+          "description": "Optional. Relative path from the repository root to the Skaffold file.",
+          "type": "string"
+        },
+        "ref": {
+          "description": "Optional. Git ref the package should be cloned from.",
+          "type": "string"
+        },
+        "repo": {
+          "description": "Required. Git repository the package should be cloned from.",
+          "type": "string"
+        }
+      },
+      "type": "object"
+    },
+    "SkaffoldModules": {
+      "description": "Skaffold Config modules and their remote source.",
+      "id": "SkaffoldModules",
+      "properties": {
+        "configs": {
+          "description": "Optional. The Skaffold Config modules to use from the specified source.",
+          "items": {
+            "type": "string"
+          },
+          "type": "array"
+        },
+        "git": {
+          "$ref": "SkaffoldGitSource",
+          "description": "Remote git repository containing the Skaffold Config modules."
+        },
+        "googleCloudStorage": {
+          "$ref": "SkaffoldGCSSource",
+          "description": "Cloud Storage bucket containing the Skaffold Config modules."
+        }
+      },
+      "type": "object"
+    },
     "SkaffoldSupportedCondition": {
-      "description": "SkaffoldSupportedCondition contains information about when support for the release's version of skaffold ends.",
+      "description": "SkaffoldSupportedCondition contains information about when support for the release's version of Skaffold ends.",
       "id": "SkaffoldSupportedCondition",
       "properties": {
         "maintenanceModeTime": {
-          "description": "The time at which this release's version of skaffold will enter maintenance mode.",
+          "description": "The time at which this release's version of Skaffold will enter maintenance mode.",
           "format": "google-datetime",
           "type": "string"
         },
         "skaffoldSupportState": {
-          "description": "The skaffold support state for this release's version of skaffold.",
+          "description": "The Skaffold support state for this release's version of Skaffold.",
           "enum": [
             "SKAFFOLD_SUPPORT_STATE_UNSPECIFIED",
             "SKAFFOLD_SUPPORT_STATE_SUPPORTED",
@@ -5065,18 +5542,18 @@
           ],
           "enumDescriptions": [
             "Default value. This value is unused.",
-            "This skaffold version is currently supported.",
-            "This skaffold version is in maintenance mode.",
-            "This skaffold version is no longer supported."
+            "This Skaffold version is currently supported.",
+            "This Skaffold version is in maintenance mode.",
+            "This Skaffold version is no longer supported."
           ],
           "type": "string"
         },
         "status": {
-          "description": "True if the version of skaffold used by this release is supported.",
+          "description": "True if the version of Skaffold used by this release is supported.",
           "type": "boolean"
         },
         "supportExpirationTime": {
-          "description": "The time at which this release's version of skaffold will no longer be supported.",
+          "description": "The time at which this release's version of Skaffold will no longer be supported.",
           "format": "google-datetime",
           "type": "string"
         }
@@ -5088,7 +5565,7 @@
       "id": "SkaffoldVersion",
       "properties": {
         "maintenanceModeTime": {
-          "description": "The time at which this version of skaffold will enter maintenance mode.",
+          "description": "The time at which this version of Skaffold will enter maintenance mode.",
           "format": "google-datetime",
           "type": "string"
         },
@@ -5097,7 +5574,7 @@
           "description": "Date when this version is expected to no longer be supported."
         },
         "supportExpirationTime": {
-          "description": "The time at which this version of skaffold will no longer be supported.",
+          "description": "The time at which this version of Skaffold will no longer be supported.",
           "format": "google-datetime",
           "type": "string"
         },
@@ -5219,6 +5696,10 @@
           "readOnly": true,
           "type": "string"
         },
+        "customTarget": {
+          "$ref": "CustomTarget",
+          "description": "Optional. Information specifying a Custom Target."
+        },
         "deployParameters": {
           "additionalProperties": {
             "type": "string"
@@ -5397,16 +5878,18 @@
             "CLOUD_BUILD_REQUEST_FAILED",
             "VERIFICATION_CONFIG_NOT_FOUND",
             "CUSTOM_ACTION_NOT_FOUND",
-            "DEPLOYMENT_STRATEGY_NOT_SUPPORTED"
+            "DEPLOYMENT_STRATEGY_NOT_SUPPORTED",
+            "RENDER_FEATURE_NOT_SUPPORTED"
           ],
           "enumDescriptions": [
             "No reason for failure is specified.",
             "Cloud Build is not available, either because it is not enabled or because Cloud Deploy has insufficient permissions. See [required permission](https://cloud.google.com/deploy/docs/cloud-deploy-service-account#required_permissions).",
             "The render operation did not complete successfully; check Cloud Build logs.",
             "Cloud Build failed to fulfill Cloud Deploy's request. See failure_message for additional details.",
-            "The render operation did not complete successfully because the verification stanza required for verify was not found on the skaffold configuration.",
-            "The render operation did not complete successfully because the custom action required for predeploy or postdeploy was not found in the skaffold configuration. See failure_message for additional details.",
-            "Release failed during rendering because the release configuration is not supported with the specified deployment strategy."
+            "The render operation did not complete successfully because the verification stanza required for verify was not found on the Skaffold configuration.",
+            "The render operation did not complete successfully because the custom action required for predeploy or postdeploy was not found in the Skaffold configuration. See failure_message for additional details.",
+            "Release failed during rendering because the release configuration is not supported with the specified deployment strategy.",
+            "The render operation had a feature configured that is not supported."
           ],
           "readOnly": true,
           "type": "string"
@@ -5447,7 +5930,7 @@
       "type": "object"
     },
     "TargetsPresentCondition": {
-      "description": "TargetsPresentCondition contains information on any Targets defined in the Delivery Pipeline that do not actually exist.",
+      "description": "`TargetsPresentCondition` contains information on any Targets referenced in the Delivery Pipeline that do not actually exist.",
       "id": "TargetsPresentCondition",
       "properties": {
         "missingTargets": {
diff --git a/googleapiclient/discovery_cache/documents/cloudkms.v1.json b/googleapiclient/discovery_cache/documents/cloudkms.v1.json
index a3ff0b406f2..36728a9b998 100644
--- a/googleapiclient/discovery_cache/documents/cloudkms.v1.json
+++ b/googleapiclient/discovery_cache/documents/cloudkms.v1.json
@@ -1828,7 +1828,7 @@
       }
     }
   },
-  "revision": "20231109",
+  "revision": "20231130",
   "rootUrl": "https://cloudkms.googleapis.com/",
   "schemas": {
     "AsymmetricDecryptRequest": {
diff --git a/googleapiclient/discovery_cache/documents/cloudprofiler.v2.json b/googleapiclient/discovery_cache/documents/cloudprofiler.v2.json
index 3e6abccb58b..a98a7eced18 100644
--- a/googleapiclient/discovery_cache/documents/cloudprofiler.v2.json
+++ b/googleapiclient/discovery_cache/documents/cloudprofiler.v2.json
@@ -216,7 +216,7 @@
       }
     }
   },
-  "revision": "20231127",
+  "revision": "20231205",
   "rootUrl": "https://cloudprofiler.googleapis.com/",
   "schemas": {
     "CreateProfileRequest": {
diff --git a/googleapiclient/discovery_cache/documents/cloudresourcemanager.v1.json b/googleapiclient/discovery_cache/documents/cloudresourcemanager.v1.json
index 111b6658a3b..ce8cf3ec78e 100644
--- a/googleapiclient/discovery_cache/documents/cloudresourcemanager.v1.json
+++ b/googleapiclient/discovery_cache/documents/cloudresourcemanager.v1.json
@@ -1171,7 +1171,7 @@
       }
     }
   },
-  "revision": "20231203",
+  "revision": "20231207",
   "rootUrl": "https://cloudresourcemanager.googleapis.com/",
   "schemas": {
     "Ancestor": {
diff --git a/googleapiclient/discovery_cache/documents/cloudresourcemanager.v1beta1.json b/googleapiclient/discovery_cache/documents/cloudresourcemanager.v1beta1.json
index 6c0aea16bef..2858e8fb804 100644
--- a/googleapiclient/discovery_cache/documents/cloudresourcemanager.v1beta1.json
+++ b/googleapiclient/discovery_cache/documents/cloudresourcemanager.v1beta1.json
@@ -568,7 +568,7 @@
       }
     }
   },
-  "revision": "20231203",
+  "revision": "20231207",
   "rootUrl": "https://cloudresourcemanager.googleapis.com/",
   "schemas": {
     "Ancestor": {
diff --git a/googleapiclient/discovery_cache/documents/cloudresourcemanager.v2.json b/googleapiclient/discovery_cache/documents/cloudresourcemanager.v2.json
index 8805bfe92ba..65dde741717 100644
--- a/googleapiclient/discovery_cache/documents/cloudresourcemanager.v2.json
+++ b/googleapiclient/discovery_cache/documents/cloudresourcemanager.v2.json
@@ -450,7 +450,7 @@
       }
     }
   },
-  "revision": "20231203",
+  "revision": "20231207",
   "rootUrl": "https://cloudresourcemanager.googleapis.com/",
   "schemas": {
     "AuditConfig": {
diff --git a/googleapiclient/discovery_cache/documents/cloudresourcemanager.v2beta1.json b/googleapiclient/discovery_cache/documents/cloudresourcemanager.v2beta1.json
index 73f6cc93607..ed845985095 100644
--- a/googleapiclient/discovery_cache/documents/cloudresourcemanager.v2beta1.json
+++ b/googleapiclient/discovery_cache/documents/cloudresourcemanager.v2beta1.json
@@ -450,7 +450,7 @@
       }
     }
   },
-  "revision": "20231203",
+  "revision": "20231207",
   "rootUrl": "https://cloudresourcemanager.googleapis.com/",
   "schemas": {
     "AuditConfig": {
diff --git a/googleapiclient/discovery_cache/documents/cloudresourcemanager.v3.json b/googleapiclient/discovery_cache/documents/cloudresourcemanager.v3.json
index adef10ff10d..8dabb8acb71 100644
--- a/googleapiclient/discovery_cache/documents/cloudresourcemanager.v3.json
+++ b/googleapiclient/discovery_cache/documents/cloudresourcemanager.v3.json
@@ -1805,7 +1805,7 @@
       }
     }
   },
-  "revision": "20231203",
+  "revision": "20231207",
   "rootUrl": "https://cloudresourcemanager.googleapis.com/",
   "schemas": {
     "AuditConfig": {
diff --git a/googleapiclient/discovery_cache/documents/cloudscheduler.v1.json b/googleapiclient/discovery_cache/documents/cloudscheduler.v1.json
index 07a2f71b151..4b56e484834 100644
--- a/googleapiclient/discovery_cache/documents/cloudscheduler.v1.json
+++ b/googleapiclient/discovery_cache/documents/cloudscheduler.v1.json
@@ -418,7 +418,7 @@
       }
     }
   },
-  "revision": "20231103",
+  "revision": "20231201",
   "rootUrl": "https://cloudscheduler.googleapis.com/",
   "schemas": {
     "AppEngineHttpTarget": {
@@ -438,7 +438,7 @@
           "additionalProperties": {
             "type": "string"
           },
-          "description": "HTTP request headers. This map contains the header field names and values. Headers can be set when the job is created. Cloud Scheduler sets some headers to default values: * `User-Agent`: By default, this header is `\"AppEngine-Google; (+http://code.google.com/appengine)\"`. This header can be modified, but Cloud Scheduler will append `\"AppEngine-Google; (+http://code.google.com/appengine)\"` to the modified `User-Agent`. * `X-CloudScheduler`: This header will be set to true. * `X-CloudScheduler-JobName`: This header will contain the job name. * `X-CloudScheduler-ScheduleTime`: For Cloud Scheduler jobs specified in the unix-cron format, this header will contain the job schedule time in RFC3339 UTC \"Zulu\" format. If the job has a body and the following headers are not set by the user, Cloud Scheduler sets default values: * `Content-Type`: This will be set to `\"application/octet-stream\"`. You can override this default by explicitly setting `Content-Type` to a particular media type when creating the job. For example, you can set `Content-Type` to `\"application/json\"`. The headers below are output only. They cannot be set or overridden: * `Content-Length`: This is computed by Cloud Scheduler. * `X-Google-*`: For Google internal use only. * `X-AppEngine-*`: For Google internal use only. In addition, some App Engine headers, which contain job-specific information, are also be sent to the job handler.",
+          "description": "HTTP request headers. This map contains the header field names and values. Headers can be set when the job is created. Cloud Scheduler sets some headers to default values: * `User-Agent`: By default, this header is `\"AppEngine-Google; (+http://code.google.com/appengine)\"`. This header can be modified, but Cloud Scheduler will append `\"AppEngine-Google; (+http://code.google.com/appengine)\"` to the modified `User-Agent`. * `X-CloudScheduler`: This header will be set to true. * `X-CloudScheduler-JobName`: This header will contain the job name. * `X-CloudScheduler-ScheduleTime`: For Cloud Scheduler jobs specified in the unix-cron format, this header will contain the job schedule as an offset of UTC parsed according to RFC3339. If the job has a body and the following headers are not set by the user, Cloud Scheduler sets default values: * `Content-Type`: This will be set to `\"application/octet-stream\"`. You can override this default by explicitly setting `Content-Type` to a particular media type when creating the job. For example, you can set `Content-Type` to `\"application/json\"`. The headers below are output only. They cannot be set or overridden: * `Content-Length`: This is computed by Cloud Scheduler. * `X-Google-*`: For Google internal use only. * `X-AppEngine-*`: For Google internal use only. In addition, some App Engine headers, which contain job-specific information, are also be sent to the job handler.",
           "type": "object"
         },
         "httpMethod": {
@@ -514,7 +514,7 @@
           "additionalProperties": {
             "type": "string"
           },
-          "description": "HTTP request headers. This map contains the header field names and values. The user can specify HTTP request headers to send with the job's HTTP request. Repeated headers are not supported, but a header value can contain commas. The following headers represent a subset of the headers that accompany the job's HTTP request. Some HTTP request headers are ignored or replaced. A partial list of headers that are ignored or replaced is below: * Host: This will be computed by Cloud Scheduler and derived from uri. * `Content-Length`: This will be computed by Cloud Scheduler. * `User-Agent`: This will be set to `\"Google-Cloud-Scheduler\"`. * `X-Google-*`: Google internal use only. * `X-AppEngine-*`: Google internal use only. * `X-CloudScheduler`: This header will be set to true. * `X-CloudScheduler-JobName`: This header will contain the job name. * `X-CloudScheduler-ScheduleTime`: For Cloud Scheduler jobs specified in the unix-cron format, this header will contain the job schedule time in RFC3339 UTC \"Zulu\" format. If the job has a body and the following headers are not set by the user, Cloud Scheduler sets default values: * `Content-Type`: This will be set to `\"application/octet-stream\"`. You can override this default by explicitly setting `Content-Type` to a particular media type when creating the job. For example, you can set `Content-Type` to `\"application/json\"`. The total size of headers must be less than 80KB.",
+          "description": "HTTP request headers. This map contains the header field names and values. The user can specify HTTP request headers to send with the job's HTTP request. Repeated headers are not supported, but a header value can contain commas. The following headers represent a subset of the headers that accompany the job's HTTP request. Some HTTP request headers are ignored or replaced. A partial list of headers that are ignored or replaced is below: * Host: This will be computed by Cloud Scheduler and derived from uri. * `Content-Length`: This will be computed by Cloud Scheduler. * `User-Agent`: This will be set to `\"Google-Cloud-Scheduler\"`. * `X-Google-*`: Google internal use only. * `X-AppEngine-*`: Google internal use only. * `X-CloudScheduler`: This header will be set to true. * `X-CloudScheduler-JobName`: This header will contain the job name. * `X-CloudScheduler-ScheduleTime`: For Cloud Scheduler jobs specified in the unix-cron format, this header will contain the job schedule as an offset of UTC parsed according to RFC3339. If the job has a body and the following headers are not set by the user, Cloud Scheduler sets default values: * `Content-Type`: This will be set to `\"application/octet-stream\"`. You can override this default by explicitly setting `Content-Type` to a particular media type when creating the job. For example, you can set `Content-Type` to `\"application/json\"`. The total size of headers must be less than 80KB.",
           "type": "object"
         },
         "httpMethod": {
diff --git a/googleapiclient/discovery_cache/documents/cloudscheduler.v1beta1.json b/googleapiclient/discovery_cache/documents/cloudscheduler.v1beta1.json
index 2c7cecd2e40..4d92c998c63 100644
--- a/googleapiclient/discovery_cache/documents/cloudscheduler.v1beta1.json
+++ b/googleapiclient/discovery_cache/documents/cloudscheduler.v1beta1.json
@@ -433,7 +433,7 @@
       }
     }
   },
-  "revision": "20231103",
+  "revision": "20231201",
   "rootUrl": "https://cloudscheduler.googleapis.com/",
   "schemas": {
     "AppEngineHttpTarget": {
@@ -453,7 +453,7 @@
           "additionalProperties": {
             "type": "string"
           },
-          "description": "HTTP request headers. This map contains the header field names and values. Headers can be set when the job is created. Cloud Scheduler sets some headers to default values: * `User-Agent`: By default, this header is `\"AppEngine-Google; (+http://code.google.com/appengine)\"`. This header can be modified, but Cloud Scheduler will append `\"AppEngine-Google; (+http://code.google.com/appengine)\"` to the modified `User-Agent`. * `X-CloudScheduler`: This header will be set to true. * `X-CloudScheduler-JobName`: This header will contain the job name. * `X-CloudScheduler-ScheduleTime`: For Cloud Scheduler jobs specified in the unix-cron format, this header will contain the job schedule time in RFC3339 UTC \"Zulu\" format. If the job has a body and the following headers are not set by the user, Cloud Scheduler sets default values: * `Content-Type`: This will be set to `\"application/octet-stream\"`. You can override this default by explicitly setting `Content-Type` to a particular media type when creating the job. For example, you can set `Content-Type` to `\"application/json\"`. The headers below are output only. They cannot be set or overridden: * `Content-Length`: This is computed by Cloud Scheduler. * `X-Google-*`: For Google internal use only. * `X-AppEngine-*`: For Google internal use only. In addition, some App Engine headers, which contain job-specific information, are also be sent to the job handler.",
+          "description": "HTTP request headers. This map contains the header field names and values. Headers can be set when the job is created. Cloud Scheduler sets some headers to default values: * `User-Agent`: By default, this header is `\"AppEngine-Google; (+http://code.google.com/appengine)\"`. This header can be modified, but Cloud Scheduler will append `\"AppEngine-Google; (+http://code.google.com/appengine)\"` to the modified `User-Agent`. * `X-CloudScheduler`: This header will be set to true. * `X-CloudScheduler-JobName`: This header will contain the job name. * `X-CloudScheduler-ScheduleTime`: For Cloud Scheduler jobs specified in the unix-cron format, this header will contain the job schedule as an offset of UTC parsed according to RFC3339. If the job has a body and the following headers are not set by the user, Cloud Scheduler sets default values: * `Content-Type`: This will be set to `\"application/octet-stream\"`. You can override this default by explicitly setting `Content-Type` to a particular media type when creating the job. For example, you can set `Content-Type` to `\"application/json\"`. The headers below are output only. They cannot be set or overridden: * `Content-Length`: This is computed by Cloud Scheduler. * `X-Google-*`: For Google internal use only. * `X-AppEngine-*`: For Google internal use only. In addition, some App Engine headers, which contain job-specific information, are also be sent to the job handler.",
           "type": "object"
         },
         "httpMethod": {
@@ -529,7 +529,7 @@
           "additionalProperties": {
             "type": "string"
           },
-          "description": "HTTP request headers. This map contains the header field names and values. The user can specify HTTP request headers to send with the job's HTTP request. Repeated headers are not supported, but a header value can contain commas. The following headers represent a subset of the headers that accompany the job's HTTP request. Some HTTP request headers are ignored or replaced. A partial list of headers that are ignored or replaced is below: * Host: This will be computed by Cloud Scheduler and derived from uri. * `Content-Length`: This will be computed by Cloud Scheduler. * `User-Agent`: This will be set to `\"Google-Cloud-Scheduler\"`. * `X-Google-*`: Google internal use only. * `X-AppEngine-*`: Google internal use only. * `X-CloudScheduler`: This header will be set to true. * `X-CloudScheduler-JobName`: This header will contain the job name. * `X-CloudScheduler-ScheduleTime`: For Cloud Scheduler jobs specified in the unix-cron format, this header will contain the job schedule time in RFC3339 UTC \"Zulu\" format. If the job has a body and the following headers are not set by the user, Cloud Scheduler sets default values: * `Content-Type`: This will be set to `\"application/octet-stream\"`. You can override this default by explicitly setting `Content-Type` to a particular media type when creating the job. For example, you can set `Content-Type` to `\"application/json\"`. The total size of headers must be less than 80KB.",
+          "description": "HTTP request headers. This map contains the header field names and values. The user can specify HTTP request headers to send with the job's HTTP request. Repeated headers are not supported, but a header value can contain commas. The following headers represent a subset of the headers that accompany the job's HTTP request. Some HTTP request headers are ignored or replaced. A partial list of headers that are ignored or replaced is below: * Host: This will be computed by Cloud Scheduler and derived from uri. * `Content-Length`: This will be computed by Cloud Scheduler. * `User-Agent`: This will be set to `\"Google-Cloud-Scheduler\"`. * `X-Google-*`: Google internal use only. * `X-AppEngine-*`: Google internal use only. * `X-CloudScheduler`: This header will be set to true. * `X-CloudScheduler-JobName`: This header will contain the job name. * `X-CloudScheduler-ScheduleTime`: For Cloud Scheduler jobs specified in the unix-cron format, this header will contain the job schedule as an offset of UTC parsed according to RFC3339. If the job has a body and the following headers are not set by the user, Cloud Scheduler sets default values: * `Content-Type`: This will be set to `\"application/octet-stream\"`. You can override this default by explicitly setting `Content-Type` to a particular media type when creating the job. For example, you can set `Content-Type` to `\"application/json\"`. The total size of headers must be less than 80KB.",
           "type": "object"
         },
         "httpMethod": {
diff --git a/googleapiclient/discovery_cache/documents/cloudshell.v1.json b/googleapiclient/discovery_cache/documents/cloudshell.v1.json
index 2a7150b7ac9..2fe0648bb7e 100644
--- a/googleapiclient/discovery_cache/documents/cloudshell.v1.json
+++ b/googleapiclient/discovery_cache/documents/cloudshell.v1.json
@@ -374,7 +374,7 @@
       }
     }
   },
-  "revision": "20231204",
+  "revision": "20231211",
   "rootUrl": "https://cloudshell.googleapis.com/",
   "schemas": {
     "AddPublicKeyMetadata": {
diff --git a/googleapiclient/discovery_cache/documents/cloudsupport.v2.json b/googleapiclient/discovery_cache/documents/cloudsupport.v2.json
index 9436f326c6b..d836353220b 100644
--- a/googleapiclient/discovery_cache/documents/cloudsupport.v2.json
+++ b/googleapiclient/discovery_cache/documents/cloudsupport.v2.json
@@ -552,7 +552,7 @@
       }
     }
   },
-  "revision": "20231204",
+  "revision": "20231207",
   "rootUrl": "https://cloudsupport.googleapis.com/",
   "schemas": {
     "Actor": {
diff --git a/googleapiclient/discovery_cache/documents/cloudsupport.v2beta.json b/googleapiclient/discovery_cache/documents/cloudsupport.v2beta.json
index 447989fdb32..bd9c6aa5813 100644
--- a/googleapiclient/discovery_cache/documents/cloudsupport.v2beta.json
+++ b/googleapiclient/discovery_cache/documents/cloudsupport.v2beta.json
@@ -548,7 +548,7 @@
       }
     }
   },
-  "revision": "20231204",
+  "revision": "20231207",
   "rootUrl": "https://cloudsupport.googleapis.com/",
   "schemas": {
     "Actor": {
diff --git a/googleapiclient/discovery_cache/documents/cloudtasks.v2.json b/googleapiclient/discovery_cache/documents/cloudtasks.v2.json
index 6801a27079b..d81cfd3253c 100644
--- a/googleapiclient/discovery_cache/documents/cloudtasks.v2.json
+++ b/googleapiclient/discovery_cache/documents/cloudtasks.v2.json
@@ -779,7 +779,7 @@
       }
     }
   },
-  "revision": "20231103",
+  "revision": "20231201",
   "rootUrl": "https://cloudtasks.googleapis.com/",
   "schemas": {
     "AppEngineHttpRequest": {
@@ -962,7 +962,7 @@
         },
         "task": {
           "$ref": "Task",
-          "description": "Required. The task to add. Task names have the following format: `projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks/TASK_ID`. The user can optionally specify a task name. If a name is not specified then the system will generate a random unique task id, which will be set in the task returned in the response. If schedule_time is not set or is in the past then Cloud Tasks will set it to the current time. Task De-duplication: Explicitly specifying a task ID enables task de-duplication. If a task's ID is identical to that of an existing task or a task that was deleted or executed recently then the call will fail with ALREADY_EXISTS. If the task's queue was created using Cloud Tasks, then another task with the same name can't be created for ~1hour after the original task was deleted or executed. If the task's queue was created using queue.yaml or queue.xml, then another task with the same name can't be created for ~9days after the original task was deleted or executed. Because there is an extra lookup cost to identify duplicate task names, these CreateTask calls have significantly increased latency. Using hashed strings for the task id or for the prefix of the task id is recommended. Choosing task ids that are sequential or have sequential prefixes, for example using a timestamp, causes an increase in latency and error rates in all task commands. The infrastructure relies on an approximately uniform distribution of task ids to store and serve tasks efficiently."
+          "description": "Required. The task to add. Task names have the following format: `projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks/TASK_ID`. The user can optionally specify a task name. If a name is not specified then the system will generate a random unique task id, which will be set in the task returned in the response. If schedule_time is not set or is in the past then Cloud Tasks will set it to the current time. Task De-duplication: Explicitly specifying a task ID enables task de-duplication. If a task's ID is identical to that of an existing task or a task that was deleted or executed recently then the call will fail with ALREADY_EXISTS. The IDs of deleted tasks are not immediately available for reuse. It can take up to 4 hours (or 9 days if the task's queue was created using a queue.yaml or queue.xml) for the task ID to be released and made available again. Because there is an extra lookup cost to identify duplicate task names, these CreateTask calls have significantly increased latency. Using hashed strings for the task id or for the prefix of the task id is recommended. Choosing task ids that are sequential or have sequential prefixes, for example using a timestamp, causes an increase in latency and error rates in all task commands. The infrastructure relies on an approximately uniform distribution of task ids to store and serve tasks efficiently."
         }
       },
       "type": "object"
diff --git a/googleapiclient/discovery_cache/documents/cloudtasks.v2beta2.json b/googleapiclient/discovery_cache/documents/cloudtasks.v2beta2.json
index 3c86068a936..99bd30f5a9d 100644
--- a/googleapiclient/discovery_cache/documents/cloudtasks.v2beta2.json
+++ b/googleapiclient/discovery_cache/documents/cloudtasks.v2beta2.json
@@ -935,7 +935,7 @@
       }
     }
   },
-  "revision": "20231103",
+  "revision": "20231201",
   "rootUrl": "https://cloudtasks.googleapis.com/",
   "schemas": {
     "AcknowledgeTaskRequest": {
@@ -1167,7 +1167,7 @@
         },
         "task": {
           "$ref": "Task",
-          "description": "Required. The task to add. Task names have the following format: `projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks/TASK_ID`. The user can optionally specify a task name. If a name is not specified then the system will generate a random unique task id, which will be set in the task returned in the response. If schedule_time is not set or is in the past then Cloud Tasks will set it to the current time. Task De-duplication: Explicitly specifying a task ID enables task de-duplication. If a task's ID is identical to that of an existing task or a task that was deleted or completed recently then the call will fail with ALREADY_EXISTS. If the task's queue was created using Cloud Tasks, then another task with the same name can't be created for ~1 hour after the original task was deleted or completed. If the task's queue was created using queue.yaml or queue.xml, then another task with the same name can't be created for ~9 days after the original task was deleted or completed. Because there is an extra lookup cost to identify duplicate task names, these CreateTask calls have significantly increased latency. Using hashed strings for the task id or for the prefix of the task id is recommended. Choosing task ids that are sequential or have sequential prefixes, for example using a timestamp, causes an increase in latency and error rates in all task commands. The infrastructure relies on an approximately uniform distribution of task ids to store and serve tasks efficiently."
+          "description": "Required. The task to add. Task names have the following format: `projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks/TASK_ID`. The user can optionally specify a task name. If a name is not specified then the system will generate a random unique task id, which will be set in the task returned in the response. If schedule_time is not set or is in the past then Cloud Tasks will set it to the current time. Task De-duplication: Explicitly specifying a task ID enables task de-duplication. If a task's ID is identical to that of an existing task or a task that was deleted or completed recently then the call will fail with ALREADY_EXISTS. The IDs of deleted tasks are not immediately available for reuse. It can take up to 4 hours (or 9 days if the task's queue was created using a queue.yaml or queue.xml) for the task ID to be released and made available again. Because there is an extra lookup cost to identify duplicate task names, these CreateTask calls have significantly increased latency. Using hashed strings for the task id or for the prefix of the task id is recommended. Choosing task ids that are sequential or have sequential prefixes, for example using a timestamp, causes an increase in latency and error rates in all task commands. The infrastructure relies on an approximately uniform distribution of task ids to store and serve tasks efficiently."
         }
       },
       "type": "object"
diff --git a/googleapiclient/discovery_cache/documents/cloudtasks.v2beta3.json b/googleapiclient/discovery_cache/documents/cloudtasks.v2beta3.json
index 7fbd977f6c7..3901fe55b46 100644
--- a/googleapiclient/discovery_cache/documents/cloudtasks.v2beta3.json
+++ b/googleapiclient/discovery_cache/documents/cloudtasks.v2beta3.json
@@ -791,7 +791,7 @@
       }
     }
   },
-  "revision": "20231103",
+  "revision": "20231201",
   "rootUrl": "https://cloudtasks.googleapis.com/",
   "schemas": {
     "AppEngineHttpQueue": {
@@ -985,7 +985,7 @@
         },
         "task": {
           "$ref": "Task",
-          "description": "Required. The task to add. Task names have the following format: `projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks/TASK_ID`. The user can optionally specify a task name. If a name is not specified then the system will generate a random unique task id, which will be set in the task returned in the response. If schedule_time is not set or is in the past then Cloud Tasks will set it to the current time. Task De-duplication: Explicitly specifying a task ID enables task de-duplication. If a task's ID is identical to that of an existing task or a task that was deleted or executed recently then the call will fail with ALREADY_EXISTS. If the task's queue was created using Cloud Tasks, then another task with the same name can't be created for ~1 hour after the original task was deleted or executed. If the task's queue was created using queue.yaml or queue.xml, then another task with the same name can't be created for ~9 days after the original task was deleted or executed. Because there is an extra lookup cost to identify duplicate task names, these CreateTask calls have significantly increased latency. Using hashed strings for the task id or for the prefix of the task id is recommended. Choosing task ids that are sequential or have sequential prefixes, for example using a timestamp, causes an increase in latency and error rates in all task commands. The infrastructure relies on an approximately uniform distribution of task ids to store and serve tasks efficiently."
+          "description": "Required. The task to add. Task names have the following format: `projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks/TASK_ID`. The user can optionally specify a task name. If a name is not specified then the system will generate a random unique task id, which will be set in the task returned in the response. If schedule_time is not set or is in the past then Cloud Tasks will set it to the current time. Task De-duplication: Explicitly specifying a task ID enables task de-duplication. If a task's ID is identical to that of an existing task or a task that was deleted or executed recently then the call will fail with ALREADY_EXISTS. The IDs of deleted tasks are not immediately available for reuse. It can take up to 4 hours (or 9 days if the task's queue was created using a queue.yaml or queue.xml) for the task ID to be released and made available again. Because there is an extra lookup cost to identify duplicate task names, these CreateTask calls have significantly increased latency. Using hashed strings for the task id or for the prefix of the task id is recommended. Choosing task ids that are sequential or have sequential prefixes, for example using a timestamp, causes an increase in latency and error rates in all task commands. The infrastructure relies on an approximately uniform distribution of task ids to store and serve tasks efficiently."
         }
       },
       "type": "object"
diff --git a/googleapiclient/discovery_cache/documents/cloudtrace.v1.json b/googleapiclient/discovery_cache/documents/cloudtrace.v1.json
index 454ead9191f..cdf126f53c9 100644
--- a/googleapiclient/discovery_cache/documents/cloudtrace.v1.json
+++ b/googleapiclient/discovery_cache/documents/cloudtrace.v1.json
@@ -257,7 +257,7 @@
       }
     }
   },
-  "revision": "20231109",
+  "revision": "20231130",
   "rootUrl": "https://cloudtrace.googleapis.com/",
   "schemas": {
     "Empty": {
diff --git a/googleapiclient/discovery_cache/documents/cloudtrace.v2beta1.json b/googleapiclient/discovery_cache/documents/cloudtrace.v2beta1.json
index be29f2be69c..eb6a78a0d41 100644
--- a/googleapiclient/discovery_cache/documents/cloudtrace.v2beta1.json
+++ b/googleapiclient/discovery_cache/documents/cloudtrace.v2beta1.json
@@ -273,7 +273,7 @@
       }
     }
   },
-  "revision": "20231109",
+  "revision": "20231130",
   "rootUrl": "https://cloudtrace.googleapis.com/",
   "schemas": {
     "Empty": {
diff --git a/googleapiclient/discovery_cache/documents/composer.v1.json b/googleapiclient/discovery_cache/documents/composer.v1.json
index 1188bebe682..92307d3092b 100644
--- a/googleapiclient/discovery_cache/documents/composer.v1.json
+++ b/googleapiclient/discovery_cache/documents/composer.v1.json
@@ -599,7 +599,7 @@
       }
     }
   },
-  "revision": "20231129",
+  "revision": "20231205",
   "rootUrl": "https://composer.googleapis.com/",
   "schemas": {
     "AllowedIpRange": {
diff --git a/googleapiclient/discovery_cache/documents/composer.v1beta1.json b/googleapiclient/discovery_cache/documents/composer.v1beta1.json
index c17279c70e8..e9e95a19c5f 100644
--- a/googleapiclient/discovery_cache/documents/composer.v1beta1.json
+++ b/googleapiclient/discovery_cache/documents/composer.v1beta1.json
@@ -655,7 +655,7 @@
       }
     }
   },
-  "revision": "20231129",
+  "revision": "20231205",
   "rootUrl": "https://composer.googleapis.com/",
   "schemas": {
     "AllowedIpRange": {
diff --git a/googleapiclient/discovery_cache/documents/connectors.v1.json b/googleapiclient/discovery_cache/documents/connectors.v1.json
index 843e0013989..ed67dc5e8ec 100644
--- a/googleapiclient/discovery_cache/documents/connectors.v1.json
+++ b/googleapiclient/discovery_cache/documents/connectors.v1.json
@@ -2349,7 +2349,7 @@
       }
     }
   },
-  "revision": "20231126",
+  "revision": "20231205",
   "rootUrl": "https://connectors.googleapis.com/",
   "schemas": {
     "AuditConfig": {
@@ -2625,6 +2625,22 @@
           "description": "Key of the config variable.",
           "type": "string"
         },
+        "locationType": {
+          "description": "Optional. Location Tyep denotes where this value should be sent in BYOC connections.",
+          "enum": [
+            "LOCATION_TYPE_UNSPECIFIED",
+            "HEADER",
+            "PAYLOAD",
+            "QUERY_PARAM"
+          ],
+          "enumDescriptions": [
+            "Location type unspecified.",
+            "Request header.",
+            "Request Payload.",
+            "Request query param."
+          ],
+          "type": "string"
+        },
         "required": {
           "description": "Flag represents that this `ConfigVariable` must be provided for a connection.",
           "type": "boolean"
@@ -3211,6 +3227,26 @@
           "description": "Output only. Information about the runtime features supported by the Connector.",
           "readOnly": true
         },
+        "unsupportedConnectionTypes": {
+          "description": "Output only. Unsupported connection types.",
+          "items": {
+            "enum": [
+              "CONNECTION_TYPE_UNSPECIFIED",
+              "CONNECTION_WITH_EVENTING",
+              "ONLY_CONNECTION",
+              "ONLY_EVENTING"
+            ],
+            "enumDescriptions": [
+              "Connection type is unspecified.",
+              "Connection with eventing.",
+              "Only connection.",
+              "Only eventing."
+            ],
+            "type": "string"
+          },
+          "readOnly": true,
+          "type": "array"
+        },
         "updateTime": {
           "description": "Output only. Updated time.",
           "format": "google-datetime",
@@ -3338,7 +3374,14 @@
       "properties": {
         "authConfig": {
           "$ref": "AuthConfig",
-          "description": "Required. Configuration for establishing the authentication to the connector destination."
+          "description": "Optional. Authentication config for accessing connector facade/ proxy. This is used only when enable_backend_destination_config is true."
+        },
+        "backendVariableTemplates": {
+          "description": "Optional. Backend variables config templates. This translates to additional variable templates in connection.",
+          "items": {
+            "$ref": "ConfigVariableTemplate"
+          },
+          "type": "array"
         },
         "createTime": {
           "description": "Output only. Created time.",
@@ -3346,12 +3389,15 @@
           "readOnly": true,
           "type": "string"
         },
-        "destinationConfig": {
-          "$ref": "DestinationConfig",
-          "description": "Required. Configuration of the customConnector's destination."
+        "destinationConfigs": {
+          "description": "Optional. Destination config(s) for accessing connector facade/ proxy. This is used only when enable_backend_destination_config is true.",
+          "items": {
+            "$ref": "DestinationConfig"
+          },
+          "type": "array"
         },
         "enableBackendDestinationConfig": {
-          "description": "Optional. Whether to enable backend destination config. This is the backend server that the connector connects to.",
+          "description": "Optional. When enabled, the connector will be a facade/ proxy, and connects to the destination provided during connection creation.",
           "type": "boolean"
         },
         "labels": {
@@ -3367,7 +3413,7 @@
           "type": "string"
         },
         "serviceAccount": {
-          "description": "Required. Service account needed for runtime plane to access Custom Connector secrets.",
+          "description": "Required. Service account used by runtime plane to access auth config secrets.",
           "type": "string"
         },
         "specLocation": {
diff --git a/googleapiclient/discovery_cache/documents/connectors.v2.json b/googleapiclient/discovery_cache/documents/connectors.v2.json
index 1f23a4407ca..7d9474063cb 100644
--- a/googleapiclient/discovery_cache/documents/connectors.v2.json
+++ b/googleapiclient/discovery_cache/documents/connectors.v2.json
@@ -111,6 +111,81 @@
           "resources": {
             "connections": {
               "methods": {
+                "checkReadiness": {
+                  "description": "Reports readiness status of the connector. Similar logic to GetStatus but modified for kubernetes health check to understand.",
+                  "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/connections/{connectionsId}:checkReadiness",
+                  "httpMethod": "GET",
+                  "id": "connectors.projects.locations.connections.checkReadiness",
+                  "parameterOrder": [
+                    "name"
+                  ],
+                  "parameters": {
+                    "name": {
+                      "location": "path",
+                      "pattern": "^projects/[^/]+/locations/[^/]+/connections/[^/]+$",
+                      "required": true,
+                      "type": "string"
+                    }
+                  },
+                  "path": "v2/{+name}:checkReadiness",
+                  "response": {
+                    "$ref": "CheckReadinessResponse"
+                  },
+                  "scopes": [
+                    "https://www.googleapis.com/auth/cloud-platform"
+                  ]
+                },
+                "checkStatus": {
+                  "description": "Reports the status of the connection. Note that when the connection is in a state that is not ACTIVE, the implementation of this RPC method must return a Status with the corresponding State instead of returning a gRPC status code that is not \"OK\", which indicates that ConnectionStatus itself, not the connection, failed.",
+                  "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/connections/{connectionsId}:checkStatus",
+                  "httpMethod": "GET",
+                  "id": "connectors.projects.locations.connections.checkStatus",
+                  "parameterOrder": [
+                    "name"
+                  ],
+                  "parameters": {
+                    "name": {
+                      "location": "path",
+                      "pattern": "^projects/[^/]+/locations/[^/]+/connections/[^/]+$",
+                      "required": true,
+                      "type": "string"
+                    }
+                  },
+                  "path": "v2/{+name}:checkStatus",
+                  "response": {
+                    "$ref": "CheckStatusResponse"
+                  },
+                  "scopes": [
+                    "https://www.googleapis.com/auth/cloud-platform"
+                  ]
+                },
+                "exchangeAuthCode": {
+                  "description": "ExchangeAuthCode exchanges the OAuth authorization code (and other necessary data) for an access token (and associated credentials).",
+                  "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/connections/{connectionsId}:exchangeAuthCode",
+                  "httpMethod": "POST",
+                  "id": "connectors.projects.locations.connections.exchangeAuthCode",
+                  "parameterOrder": [
+                    "name"
+                  ],
+                  "parameters": {
+                    "name": {
+                      "location": "path",
+                      "pattern": "^projects/[^/]+/locations/[^/]+/connections/[^/]+$",
+                      "required": true,
+                      "type": "string"
+                    }
+                  },
+                  "path": "v2/{+name}:exchangeAuthCode",
+                  "request": {
+                    "$ref": "ExchangeAuthCodeRequest"
+                  },
+                  "response": {
+                    "$ref": "ExchangeAuthCodeResponse"
+                  },
+                  "scopes": [
+                    "https://www.googleapis.com/auth/cloud-platform"
+                  ]
+                },
                 "executeSqlQuery": {
                   "description": "Executes a SQL statement specified in the body of the request. An example of this SQL statement in the case of Salesforce connector would be 'select * from Account a, Order o where a.Id = o.AccountId'.",
                   "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/connections/{connectionsId}:executeSqlQuery",
@@ -138,6 +213,33 @@
                   "scopes": [
                     "https://www.googleapis.com/auth/cloud-platform"
                   ]
+                },
+                "refreshAccessToken": {
+                  "description": "RefreshAccessToken exchanges the OAuth refresh token (and other necessary data) for a new access token (and new associated credentials).",
+                  "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/connections/{connectionsId}:refreshAccessToken",
+                  "httpMethod": "POST",
+                  "id": "connectors.projects.locations.connections.refreshAccessToken",
+                  "parameterOrder": [
+                    "name"
+                  ],
+                  "parameters": {
+                    "name": {
+                      "location": "path",
+                      "pattern": "^projects/[^/]+/locations/[^/]+/connections/[^/]+$",
+                      "required": true,
+                      "type": "string"
+                    }
+                  },
+                  "path": "v2/{+name}:refreshAccessToken",
+                  "request": {
+                    "$ref": "RefreshAccessTokenRequest"
+                  },
+                  "response": {
+                    "$ref": "RefreshAccessTokenResponse"
+                  },
+                  "scopes": [
+                    "https://www.googleapis.com/auth/cloud-platform"
+                  ]
                 }
               },
               "resources": {
@@ -558,9 +660,29 @@
       }
     }
   },
-  "revision": "20231126",
+  "revision": "20231205",
   "rootUrl": "https://connectors.googleapis.com/",
   "schemas": {
+    "AccessCredentials": {
+      "description": "AccessCredentials includes the OAuth access token, and the other fields returned along with it.",
+      "id": "AccessCredentials",
+      "properties": {
+        "accessToken": {
+          "description": "OAuth access token.",
+          "type": "string"
+        },
+        "expiresIn": {
+          "description": "Duration till the access token expires.",
+          "format": "google-duration",
+          "type": "string"
+        },
+        "refreshToken": {
+          "description": "OAuth refresh token.",
+          "type": "string"
+        }
+      },
+      "type": "object"
+    },
     "Action": {
       "description": "Action message contains metadata information about a single action present in the external system.",
       "id": "Action",
@@ -602,6 +724,43 @@
       },
       "type": "object"
     },
+    "CheckReadinessResponse": {
+      "description": "Response containing status of the connector for readiness prober.",
+      "id": "CheckReadinessResponse",
+      "properties": {
+        "status": {
+          "type": "string"
+        }
+      },
+      "type": "object"
+    },
+    "CheckStatusResponse": {
+      "description": "The status of the connector.",
+      "id": "CheckStatusResponse",
+      "properties": {
+        "description": {
+          "description": "When the connector is not in ACTIVE state, the description must be populated to specify the reason why it's not in ACTIVE state.",
+          "type": "string"
+        },
+        "state": {
+          "description": "State of the connector.",
+          "enum": [
+            "STATE_UNSPECIFIED",
+            "ACTIVE",
+            "ERROR",
+            "AUTH_ERROR"
+          ],
+          "enumDescriptions": [
+            "State unspecified.",
+            "The connector is active and ready to process runtime requests. This can also mean that from the connector's perspective, the connector is not in an error state and should be able to process runtime requests successfully.",
+            "The connector is in an error state and cannot process runtime requests. An example reason would be that the connection container has some network issues that prevent outbound requests from being sent.",
+            "This is a more specific error state that the developers can opt to use when the connector is facing auth-related errors caused by auth configuration not present, invalid auth credentials, etc."
+          ],
+          "type": "string"
+        }
+      },
+      "type": "object"
+    },
     "Empty": {
       "description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); }",
       "id": "Empty",
@@ -649,6 +808,22 @@
       },
       "type": "object"
     },
+    "ExchangeAuthCodeRequest": {
+      "description": "ExchangeAuthCodeRequest currently includes no fields.",
+      "id": "ExchangeAuthCodeRequest",
+      "properties": {},
+      "type": "object"
+    },
+    "ExchangeAuthCodeResponse": {
+      "description": "ExchangeAuthCodeResponse includes the returned access token and its associated credentials.",
+      "id": "ExchangeAuthCodeResponse",
+      "properties": {
+        "accessCredentials": {
+          "$ref": "AccessCredentials"
+        }
+      },
+      "type": "object"
+    },
     "ExecuteActionRequest": {
       "description": "Request message for ActionService.ExecuteAction",
       "id": "ExecuteActionRequest",
@@ -1547,6 +1722,22 @@
       },
       "type": "object"
     },
+    "RefreshAccessTokenRequest": {
+      "description": "RefreshAccessTokenRequest currently includes no fields.",
+      "id": "RefreshAccessTokenRequest",
+      "properties": {},
+      "type": "object"
+    },
+    "RefreshAccessTokenResponse": {
+      "description": "RefreshAccessTokenResponse includes the returned access token and its associated credentials.",
+      "id": "RefreshAccessTokenResponse",
+      "properties": {
+        "accessCredentials": {
+          "$ref": "AccessCredentials"
+        }
+      },
+      "type": "object"
+    },
     "ResultMetadata": {
       "description": "Result Metadata message contains metadata about the result returned after executing an Action.",
       "id": "ResultMetadata",
diff --git a/googleapiclient/discovery_cache/documents/contactcenterinsights.v1.json b/googleapiclient/discovery_cache/documents/contactcenterinsights.v1.json
index 08e87d43144..590b9edc16f 100644
--- a/googleapiclient/discovery_cache/documents/contactcenterinsights.v1.json
+++ b/googleapiclient/discovery_cache/documents/contactcenterinsights.v1.json
@@ -1412,7 +1412,7 @@
       }
     }
   },
-  "revision": "20231206",
+  "revision": "20231211",
   "rootUrl": "https://contactcenterinsights.googleapis.com/",
   "schemas": {
     "GoogleCloudContactcenterinsightsV1Analysis": {
diff --git a/googleapiclient/discovery_cache/documents/container.v1.json b/googleapiclient/discovery_cache/documents/container.v1.json
index 2df5f7e8a47..795ccdf8707 100644
--- a/googleapiclient/discovery_cache/documents/container.v1.json
+++ b/googleapiclient/discovery_cache/documents/container.v1.json
@@ -2540,7 +2540,7 @@
       }
     }
   },
-  "revision": "20231030",
+  "revision": "20231201",
   "rootUrl": "https://container.googleapis.com/",
   "schemas": {
     "AcceleratorConfig": {
@@ -2687,6 +2687,10 @@
           "description": "Expose flow metrics on nodes",
           "type": "boolean"
         },
+        "enableRelay": {
+          "description": "Enable Relay component",
+          "type": "boolean"
+        },
         "relayMode": {
           "description": "Method used to make Relay available",
           "enum": [
@@ -6318,12 +6322,14 @@
           "enum": [
             "VULNERABILITY_MODE_UNSPECIFIED",
             "VULNERABILITY_DISABLED",
-            "VULNERABILITY_BASIC"
+            "VULNERABILITY_BASIC",
+            "VULNERABILITY_ENTERPRISE"
           ],
           "enumDescriptions": [
             "Default value not specified.",
             "Disables vulnerability scanning on the cluster.",
-            "Applies basic vulnerability scanning on the cluster."
+            "Applies basic vulnerability scanning on the cluster.",
+            "Applies the Security Posture's vulnerability on cluster Enterprise level features."
           ],
           "type": "string"
         }
diff --git a/googleapiclient/discovery_cache/documents/container.v1beta1.json b/googleapiclient/discovery_cache/documents/container.v1beta1.json
index fc4fe066518..0f1be491470 100644
--- a/googleapiclient/discovery_cache/documents/container.v1beta1.json
+++ b/googleapiclient/discovery_cache/documents/container.v1beta1.json
@@ -2565,7 +2565,7 @@
       }
     }
   },
-  "revision": "20231030",
+  "revision": "20231122",
   "rootUrl": "https://container.googleapis.com/",
   "schemas": {
     "AcceleratorConfig": {
@@ -2728,6 +2728,10 @@
           "description": "Expose flow metrics on nodes",
           "type": "boolean"
         },
+        "enableRelay": {
+          "description": "Enable Relay component",
+          "type": "boolean"
+        },
         "relayMode": {
           "description": "Method used to make Relay available",
           "enum": [
@@ -2996,8 +3000,8 @@
             "Default value",
             "Disable BinaryAuthorization",
             "Enforce Kubernetes admission requests with BinaryAuthorization using the project's singleton policy. This is equivalent to setting the enabled boolean to true.",
-            "Use Binary Authorization with the policies specified in policy_bindings.",
-            "Use Binary Authorization with the policies specified in policy_bindings, and also with the project's singleton policy in enforcement mode."
+            "Use Binary Authorization Continuous Validation with the policies specified in policy_bindings.",
+            "Use Binary Authorization Continuous Validation with the policies specified in policy_bindings and enforce Kubernetes admission requests with Binary Authorization using the project's singleton policy."
           ],
           "type": "string"
         },
diff --git a/googleapiclient/discovery_cache/documents/containeranalysis.v1.json b/googleapiclient/discovery_cache/documents/containeranalysis.v1.json
index 95e938cabee..85e56d61464 100644
--- a/googleapiclient/discovery_cache/documents/containeranalysis.v1.json
+++ b/googleapiclient/discovery_cache/documents/containeranalysis.v1.json
@@ -755,7 +755,7 @@
       }
     }
   },
-  "revision": "20231103",
+  "revision": "20231201",
   "rootUrl": "https://containeranalysis.googleapis.com/",
   "schemas": {
     "AliasContext": {
diff --git a/googleapiclient/discovery_cache/documents/containeranalysis.v1alpha1.json b/googleapiclient/discovery_cache/documents/containeranalysis.v1alpha1.json
index 51bd34f02f0..2dd65b34913 100644
--- a/googleapiclient/discovery_cache/documents/containeranalysis.v1alpha1.json
+++ b/googleapiclient/discovery_cache/documents/containeranalysis.v1alpha1.json
@@ -1233,7 +1233,7 @@
       }
     }
   },
-  "revision": "20231103",
+  "revision": "20231201",
   "rootUrl": "https://containeranalysis.googleapis.com/",
   "schemas": {
     "AnalysisCompleted": {
diff --git a/googleapiclient/discovery_cache/documents/containeranalysis.v1beta1.json b/googleapiclient/discovery_cache/documents/containeranalysis.v1beta1.json
index dafdba1155f..44c10b40e5b 100644
--- a/googleapiclient/discovery_cache/documents/containeranalysis.v1beta1.json
+++ b/googleapiclient/discovery_cache/documents/containeranalysis.v1beta1.json
@@ -815,7 +815,7 @@
       }
     }
   },
-  "revision": "20231103",
+  "revision": "20231201",
   "rootUrl": "https://containeranalysis.googleapis.com/",
   "schemas": {
     "AliasContext": {
diff --git a/googleapiclient/discovery_cache/documents/content.v2.1.json b/googleapiclient/discovery_cache/documents/content.v2.1.json
index 8289cbe856b..26df9553545 100644
--- a/googleapiclient/discovery_cache/documents/content.v2.1.json
+++ b/googleapiclient/discovery_cache/documents/content.v2.1.json
@@ -6186,7 +6186,7 @@
       }
     }
   },
-  "revision": "20231205",
+  "revision": "20231211",
   "rootUrl": "https://shoppingcontent.googleapis.com/",
   "schemas": {
     "Account": {
diff --git a/googleapiclient/discovery_cache/documents/customsearch.v1.json b/googleapiclient/discovery_cache/documents/customsearch.v1.json
index 2f594b27d9f..ad91e2dc396 100644
--- a/googleapiclient/discovery_cache/documents/customsearch.v1.json
+++ b/googleapiclient/discovery_cache/documents/customsearch.v1.json
@@ -688,7 +688,7 @@
       }
     }
   },
-  "revision": "20231205",
+  "revision": "20231210",
   "rootUrl": "https://customsearch.googleapis.com/",
   "schemas": {
     "Promotion": {
diff --git a/googleapiclient/discovery_cache/documents/datacatalog.v1.json b/googleapiclient/discovery_cache/documents/datacatalog.v1.json
index 3dbed557f6b..d532629e77e 100644
--- a/googleapiclient/discovery_cache/documents/datacatalog.v1.json
+++ b/googleapiclient/discovery_cache/documents/datacatalog.v1.json
@@ -1791,7 +1791,7 @@
                   ],
                   "parameters": {
                     "name": {
-                      "description": "Output only. Resource name of this taxonomy in URL format. Note: Policy tag manager generates unique taxonomy IDs.",
+                      "description": "Identifier. Resource name of this taxonomy in URL format. Note: Policy tag manager generates unique taxonomy IDs.",
                       "location": "path",
                       "pattern": "^projects/[^/]+/locations/[^/]+/taxonomies/[^/]+$",
                       "required": true,
@@ -2055,7 +2055,7 @@
                       ],
                       "parameters": {
                         "name": {
-                          "description": "Output only. Resource name of this policy tag in the URL format. The policy tag manager generates unique taxonomy IDs and policy tag IDs.",
+                          "description": "Identifier. Resource name of this policy tag in the URL format. The policy tag manager generates unique taxonomy IDs and policy tag IDs.",
                           "location": "path",
                           "pattern": "^projects/[^/]+/locations/[^/]+/taxonomies/[^/]+/policyTags/[^/]+$",
                           "required": true,
@@ -2144,7 +2144,7 @@
       }
     }
   },
-  "revision": "20231127",
+  "revision": "20231205",
   "rootUrl": "https://datacatalog.googleapis.com/",
   "schemas": {
     "Binding": {
@@ -3516,8 +3516,7 @@
           "type": "string"
         },
         "name": {
-          "description": "Output only. Resource name of this policy tag in the URL format. The policy tag manager generates unique taxonomy IDs and policy tag IDs.",
-          "readOnly": true,
+          "description": "Identifier. Resource name of this policy tag in the URL format. The policy tag manager generates unique taxonomy IDs and policy tag IDs.",
           "type": "string"
         },
         "parentPolicyTag": {
@@ -4267,8 +4266,7 @@
           "type": "string"
         },
         "name": {
-          "description": "Output only. Resource name of this taxonomy in URL format. Note: Policy tag manager generates unique taxonomy IDs.",
-          "readOnly": true,
+          "description": "Identifier. Resource name of this taxonomy in URL format. Note: Policy tag manager generates unique taxonomy IDs.",
           "type": "string"
         },
         "policyTagCount": {
diff --git a/googleapiclient/discovery_cache/documents/datacatalog.v1beta1.json b/googleapiclient/discovery_cache/documents/datacatalog.v1beta1.json
index 1fe2d1d588a..5b411c21fa0 100644
--- a/googleapiclient/discovery_cache/documents/datacatalog.v1beta1.json
+++ b/googleapiclient/discovery_cache/documents/datacatalog.v1beta1.json
@@ -1488,7 +1488,7 @@
                   ],
                   "parameters": {
                     "name": {
-                      "description": "Output only. Resource name of this taxonomy, whose format is: \"projects/{project_number}/locations/{location_id}/taxonomies/{id}\".",
+                      "description": "Identifier. Resource name of this taxonomy, whose format is: \"projects/{project_number}/locations/{location_id}/taxonomies/{id}\".",
                       "location": "path",
                       "pattern": "^projects/[^/]+/locations/[^/]+/taxonomies/[^/]+$",
                       "required": true,
@@ -1724,7 +1724,7 @@
                       ],
                       "parameters": {
                         "name": {
-                          "description": "Output only. Resource name of this policy tag, whose format is: \"projects/{project_number}/locations/{location_id}/taxonomies/{taxonomy_id}/policyTags/{id}\".",
+                          "description": "Identifier. Resource name of this policy tag, whose format is: \"projects/{project_number}/locations/{location_id}/taxonomies/{taxonomy_id}/policyTags/{id}\".",
                           "location": "path",
                           "pattern": "^projects/[^/]+/locations/[^/]+/taxonomies/[^/]+/policyTags/[^/]+$",
                           "required": true,
@@ -1813,7 +1813,7 @@
       }
     }
   },
-  "revision": "20231127",
+  "revision": "20231205",
   "rootUrl": "https://datacatalog.googleapis.com/",
   "schemas": {
     "Binding": {
@@ -3861,8 +3861,7 @@
           "type": "string"
         },
         "name": {
-          "description": "Output only. Resource name of this policy tag, whose format is: \"projects/{project_number}/locations/{location_id}/taxonomies/{taxonomy_id}/policyTags/{id}\".",
-          "readOnly": true,
+          "description": "Identifier. Resource name of this policy tag, whose format is: \"projects/{project_number}/locations/{location_id}/taxonomies/{taxonomy_id}/policyTags/{id}\".",
           "type": "string"
         },
         "parentPolicyTag": {
@@ -4301,8 +4300,7 @@
           "type": "string"
         },
         "name": {
-          "description": "Output only. Resource name of this taxonomy, whose format is: \"projects/{project_number}/locations/{location_id}/taxonomies/{id}\".",
-          "readOnly": true,
+          "description": "Identifier. Resource name of this taxonomy, whose format is: \"projects/{project_number}/locations/{location_id}/taxonomies/{id}\".",
           "type": "string"
         },
         "policyTagCount": {
diff --git a/googleapiclient/discovery_cache/documents/dataflow.v1b3.json b/googleapiclient/discovery_cache/documents/dataflow.v1b3.json
index 299efc1ec42..c44f02bb916 100644
--- a/googleapiclient/discovery_cache/documents/dataflow.v1b3.json
+++ b/googleapiclient/discovery_cache/documents/dataflow.v1b3.json
@@ -2221,7 +2221,7 @@
       }
     }
   },
-  "revision": "20231130",
+  "revision": "20231203",
   "rootUrl": "https://dataflow.googleapis.com/",
   "schemas": {
     "ApproximateProgress": {
@@ -6503,6 +6503,43 @@
       },
       "type": "object"
     },
+    "StreamingScalingReport": {
+      "description": "Contains per-user worker telemetry used in streaming autoscaling.",
+      "id": "StreamingScalingReport",
+      "properties": {
+        "activeBundleCount": {
+          "description": "Current acive bundle count.",
+          "format": "int32",
+          "type": "integer"
+        },
+        "activeThreadCount": {
+          "description": "Current acive thread count.",
+          "format": "int32",
+          "type": "integer"
+        },
+        "maximumBundleCount": {
+          "description": "Maximum bundle count limit.",
+          "format": "int32",
+          "type": "integer"
+        },
+        "maximumBytesCount": {
+          "description": "Maximum bytes count limit.",
+          "format": "int32",
+          "type": "integer"
+        },
+        "maximumThreadCount": {
+          "description": "Maximum thread count limit.",
+          "format": "int32",
+          "type": "integer"
+        },
+        "outstandingBytesCount": {
+          "description": "Current outstanding bytes count.",
+          "format": "int32",
+          "type": "integer"
+        }
+      },
+      "type": "object"
+    },
     "StreamingSetupTask": {
       "description": "A task which initializes part of a streaming Dataflow job.",
       "id": "StreamingSetupTask",
@@ -7236,6 +7273,10 @@
           "description": "Labels are used to group WorkerMessages. For example, a worker_message about a particular container might have the labels: { \"JOB_ID\": \"2015-04-22\", \"WORKER_ID\": \"wordcount-vm-2015\u2026\" \"CONTAINER_TYPE\": \"worker\", \"CONTAINER_ID\": \"ac1234def\"} Label tags typically correspond to Label enum values. However, for ease of development other strings can be used as tags. LABEL_UNSPECIFIED should not be used here.",
           "type": "object"
         },
+        "streamingScalingReport": {
+          "$ref": "StreamingScalingReport",
+          "description": "Contains per-user worker telemetry used in streaming autoscaling."
+        },
         "time": {
           "description": "The timestamp of the worker_message.",
           "format": "google-datetime",
diff --git a/googleapiclient/discovery_cache/documents/dataform.v1beta1.json b/googleapiclient/discovery_cache/documents/dataform.v1beta1.json
index 2e05fcc9280..9967ec913ca 100644
--- a/googleapiclient/discovery_cache/documents/dataform.v1beta1.json
+++ b/googleapiclient/discovery_cache/documents/dataform.v1beta1.json
@@ -1969,7 +1969,7 @@
                       ],
                       "parameters": {
                         "filter": {
-                          "description": "Optional. Optional filter for the returned list in go/filtering format. Filtering is only currently supported on the `path` field.",
+                          "description": "Optional. Optional filter for the returned list in filtering format. Filtering is only currently supported on the `path` field. See https://google.aip.dev/160 for details.",
                           "location": "query",
                           "type": "string"
                         },
@@ -2093,7 +2093,7 @@
       }
     }
   },
-  "revision": "20231111",
+  "revision": "20231203",
   "rootUrl": "https://dataform.googleapis.com/",
   "schemas": {
     "Assertion": {
diff --git a/googleapiclient/discovery_cache/documents/datalineage.v1.json b/googleapiclient/discovery_cache/documents/datalineage.v1.json
index ec0e034a881..3ba389ca226 100644
--- a/googleapiclient/discovery_cache/documents/datalineage.v1.json
+++ b/googleapiclient/discovery_cache/documents/datalineage.v1.json
@@ -798,7 +798,7 @@
       }
     }
   },
-  "revision": "20231027",
+  "revision": "20231201",
   "rootUrl": "https://datalineage.googleapis.com/",
   "schemas": {
     "GoogleCloudDatacatalogLineageV1BatchSearchLinkProcessesRequest": {
diff --git a/googleapiclient/discovery_cache/documents/datamigration.v1.json b/googleapiclient/discovery_cache/documents/datamigration.v1.json
index 10c4252fde9..5ca7cd9ba6d 100644
--- a/googleapiclient/discovery_cache/documents/datamigration.v1.json
+++ b/googleapiclient/discovery_cache/documents/datamigration.v1.json
@@ -2125,7 +2125,7 @@
       }
     }
   },
-  "revision": "20231111",
+  "revision": "20231204",
   "rootUrl": "https://datamigration.googleapis.com/",
   "schemas": {
     "AlloyDbConnectionProfile": {
diff --git a/googleapiclient/discovery_cache/documents/datamigration.v1beta1.json b/googleapiclient/discovery_cache/documents/datamigration.v1beta1.json
index 362236e507d..5b6a1cff8a8 100644
--- a/googleapiclient/discovery_cache/documents/datamigration.v1beta1.json
+++ b/googleapiclient/discovery_cache/documents/datamigration.v1beta1.json
@@ -1049,7 +1049,7 @@
       }
     }
   },
-  "revision": "20231111",
+  "revision": "20231204",
   "rootUrl": "https://datamigration.googleapis.com/",
   "schemas": {
     "AuditConfig": {
diff --git a/googleapiclient/discovery_cache/documents/datapipelines.v1.json b/googleapiclient/discovery_cache/documents/datapipelines.v1.json
index db40b40b3fd..72f26da94f0 100644
--- a/googleapiclient/discovery_cache/documents/datapipelines.v1.json
+++ b/googleapiclient/discovery_cache/documents/datapipelines.v1.json
@@ -369,7 +369,7 @@
       }
     }
   },
-  "revision": "20231112",
+  "revision": "20231203",
   "rootUrl": "https://datapipelines.googleapis.com/",
   "schemas": {
     "GoogleCloudDatapipelinesV1DataflowJobDetails": {
diff --git a/googleapiclient/discovery_cache/documents/datastream.v1.json b/googleapiclient/discovery_cache/documents/datastream.v1.json
index a7a94ca50b9..b8a75f43d92 100644
--- a/googleapiclient/discovery_cache/documents/datastream.v1.json
+++ b/googleapiclient/discovery_cache/documents/datastream.v1.json
@@ -1250,7 +1250,7 @@
       }
     }
   },
-  "revision": "20231108",
+  "revision": "20231129",
   "rootUrl": "https://datastream.googleapis.com/",
   "schemas": {
     "AvroFileFormat": {
@@ -3046,13 +3046,15 @@
             "STATE_UNSPECIFIED",
             "NOT_EXECUTED",
             "FAILED",
-            "PASSED"
+            "PASSED",
+            "WARNING"
           ],
           "enumDescriptions": [
             "Unspecified state.",
             "Validation did not execute.",
             "Validation failed.",
-            "Validation passed."
+            "Validation passed.",
+            "Validation executed with warnings."
           ],
           "readOnly": true,
           "type": "string"
diff --git a/googleapiclient/discovery_cache/documents/datastream.v1alpha1.json b/googleapiclient/discovery_cache/documents/datastream.v1alpha1.json
index cbfba1bec6e..18b11ff1ec2 100644
--- a/googleapiclient/discovery_cache/documents/datastream.v1alpha1.json
+++ b/googleapiclient/discovery_cache/documents/datastream.v1alpha1.json
@@ -1224,7 +1224,7 @@
       }
     }
   },
-  "revision": "20231108",
+  "revision": "20231129",
   "rootUrl": "https://datastream.googleapis.com/",
   "schemas": {
     "AvroFileFormat": {
diff --git a/googleapiclient/discovery_cache/documents/deploymentmanager.alpha.json b/googleapiclient/discovery_cache/documents/deploymentmanager.alpha.json
index 8d61ff1c07e..ff3bf4bef32 100644
--- a/googleapiclient/discovery_cache/documents/deploymentmanager.alpha.json
+++ b/googleapiclient/discovery_cache/documents/deploymentmanager.alpha.json
@@ -1588,7 +1588,7 @@
       }
     }
   },
-  "revision": "20231130",
+  "revision": "20231207",
   "rootUrl": "https://deploymentmanager.googleapis.com/",
   "schemas": {
     "AsyncOptions": {
diff --git a/googleapiclient/discovery_cache/documents/digitalassetlinks.v1.json b/googleapiclient/discovery_cache/documents/digitalassetlinks.v1.json
index dbe121af747..ecbc6470d35 100644
--- a/googleapiclient/discovery_cache/documents/digitalassetlinks.v1.json
+++ b/googleapiclient/discovery_cache/documents/digitalassetlinks.v1.json
@@ -199,7 +199,7 @@
       }
     }
   },
-  "revision": "20231202",
+  "revision": "20231205",
   "rootUrl": "https://digitalassetlinks.googleapis.com/",
   "schemas": {
     "AndroidAppAsset": {
diff --git a/googleapiclient/discovery_cache/documents/displayvideo.v1.json b/googleapiclient/discovery_cache/documents/displayvideo.v1.json
index ef5d9a33bb5..2af89fc32ac 100644
--- a/googleapiclient/discovery_cache/documents/displayvideo.v1.json
+++ b/googleapiclient/discovery_cache/documents/displayvideo.v1.json
@@ -8146,7 +8146,7 @@
       }
     }
   },
-  "revision": "20231129",
+  "revision": "20231207",
   "rootUrl": "https://displayvideo.googleapis.com/",
   "schemas": {
     "ActivateManualTriggerRequest": {
diff --git a/googleapiclient/discovery_cache/documents/displayvideo.v2.json b/googleapiclient/discovery_cache/documents/displayvideo.v2.json
index 216d0762300..bb5632d361c 100644
--- a/googleapiclient/discovery_cache/documents/displayvideo.v2.json
+++ b/googleapiclient/discovery_cache/documents/displayvideo.v2.json
@@ -9172,7 +9172,7 @@
       }
     }
   },
-  "revision": "20231129",
+  "revision": "20231207",
   "rootUrl": "https://displayvideo.googleapis.com/",
   "schemas": {
     "ActivateManualTriggerRequest": {
diff --git a/googleapiclient/discovery_cache/documents/displayvideo.v3.json b/googleapiclient/discovery_cache/documents/displayvideo.v3.json
index c2393766f8a..6e7d44510be 100644
--- a/googleapiclient/discovery_cache/documents/displayvideo.v3.json
+++ b/googleapiclient/discovery_cache/documents/displayvideo.v3.json
@@ -9127,7 +9127,7 @@
       }
     }
   },
-  "revision": "20231129",
+  "revision": "20231207",
   "rootUrl": "https://displayvideo.googleapis.com/",
   "schemas": {
     "ActiveViewVideoViewabilityMetricConfig": {
diff --git a/googleapiclient/discovery_cache/documents/dlp.v2.json b/googleapiclient/discovery_cache/documents/dlp.v2.json
index 26dfbf2878b..ffc3c361340 100644
--- a/googleapiclient/discovery_cache/documents/dlp.v2.json
+++ b/googleapiclient/discovery_cache/documents/dlp.v2.json
@@ -3714,7 +3714,7 @@
       }
     }
   },
-  "revision": "20231127",
+  "revision": "20231203",
   "rootUrl": "https://dlp.googleapis.com/",
   "schemas": {
     "GooglePrivacyDlpV2Action": {
diff --git a/googleapiclient/discovery_cache/documents/dns.v1.json b/googleapiclient/discovery_cache/documents/dns.v1.json
index 98e05e3554d..7b8b01c91b9 100644
--- a/googleapiclient/discovery_cache/documents/dns.v1.json
+++ b/googleapiclient/discovery_cache/documents/dns.v1.json
@@ -1824,7 +1824,7 @@
       }
     }
   },
-  "revision": "20231110",
+  "revision": "20231209",
   "rootUrl": "https://dns.googleapis.com/",
   "schemas": {
     "Change": {
@@ -3025,6 +3025,10 @@
         "geo": {
           "$ref": "RRSetRoutingPolicyGeoPolicy"
         },
+        "healthCheck": {
+          "description": "The selfLink attribute of the HealthCheck resource to use for this RRSetRoutingPolicy. https://cloud.google.com/compute/docs/reference/rest/v1/healthChecks",
+          "type": "string"
+        },
         "kind": {
           "default": "dns#rRSetRoutingPolicy",
           "type": "string"
@@ -3093,10 +3097,18 @@
       "type": "object"
     },
     "RRSetRoutingPolicyHealthCheckTargets": {
-      "description": "HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response.",
+      "description": "HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set.",
       "id": "RRSetRoutingPolicyHealthCheckTargets",
       "properties": {
+        "externalEndpoints": {
+          "description": "The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1)",
+          "items": {
+            "type": "string"
+          },
+          "type": "array"
+        },
         "internalLoadBalancers": {
+          "description": "Configuration for internal load balancers to be health checked.",
           "items": {
             "$ref": "RRSetRoutingPolicyLoadBalancerTarget"
           },
diff --git a/googleapiclient/discovery_cache/documents/dns.v1beta2.json b/googleapiclient/discovery_cache/documents/dns.v1beta2.json
index c385ae52741..ce2500be4f5 100644
--- a/googleapiclient/discovery_cache/documents/dns.v1beta2.json
+++ b/googleapiclient/discovery_cache/documents/dns.v1beta2.json
@@ -1821,7 +1821,7 @@
       }
     }
   },
-  "revision": "20231110",
+  "revision": "20231209",
   "rootUrl": "https://dns.googleapis.com/",
   "schemas": {
     "Change": {
@@ -3026,6 +3026,10 @@
           "$ref": "RRSetRoutingPolicyGeoPolicy",
           "deprecated": true
         },
+        "healthCheck": {
+          "description": "The selfLink attribute of the HealthCheck resource to use for this RRSetRoutingPolicy. https://cloud.google.com/compute/docs/reference/rest/v1/healthChecks",
+          "type": "string"
+        },
         "kind": {
           "default": "dns#rRSetRoutingPolicy",
           "type": "string"
@@ -3098,10 +3102,18 @@
       "type": "object"
     },
     "RRSetRoutingPolicyHealthCheckTargets": {
-      "description": "HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response.",
+      "description": "HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set.",
       "id": "RRSetRoutingPolicyHealthCheckTargets",
       "properties": {
+        "externalEndpoints": {
+          "description": "The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1)",
+          "items": {
+            "type": "string"
+          },
+          "type": "array"
+        },
         "internalLoadBalancers": {
+          "description": "Configuration for internal load balancers to be health checked.",
           "items": {
             "$ref": "RRSetRoutingPolicyLoadBalancerTarget"
           },
diff --git a/googleapiclient/discovery_cache/documents/docs.v1.json b/googleapiclient/discovery_cache/documents/docs.v1.json
index f4991393efe..4d41c4f3601 100644
--- a/googleapiclient/discovery_cache/documents/docs.v1.json
+++ b/googleapiclient/discovery_cache/documents/docs.v1.json
@@ -216,7 +216,7 @@
       }
     }
   },
-  "revision": "20231128",
+  "revision": "20231205",
   "rootUrl": "https://docs.googleapis.com/",
   "schemas": {
     "AutoText": {
diff --git a/googleapiclient/discovery_cache/documents/documentai.v1.json b/googleapiclient/discovery_cache/documents/documentai.v1.json
index efe51821a9e..797fe5c879e 100644
--- a/googleapiclient/discovery_cache/documents/documentai.v1.json
+++ b/googleapiclient/discovery_cache/documents/documentai.v1.json
@@ -1042,7 +1042,7 @@
       }
     }
   },
-  "revision": "20231206",
+  "revision": "20231208",
   "rootUrl": "https://documentai.googleapis.com/",
   "schemas": {
     "GoogleCloudDocumentaiUiv1beta3AutoLabelDocumentsMetadata": {
diff --git a/googleapiclient/discovery_cache/documents/documentai.v1beta2.json b/googleapiclient/discovery_cache/documents/documentai.v1beta2.json
index a26cf81dd4a..cef0457fbaf 100644
--- a/googleapiclient/discovery_cache/documents/documentai.v1beta2.json
+++ b/googleapiclient/discovery_cache/documents/documentai.v1beta2.json
@@ -292,7 +292,7 @@
       }
     }
   },
-  "revision": "20231206",
+  "revision": "20231208",
   "rootUrl": "https://documentai.googleapis.com/",
   "schemas": {
     "GoogleCloudDocumentaiUiv1beta3AutoLabelDocumentsMetadata": {
diff --git a/googleapiclient/discovery_cache/documents/documentai.v1beta3.json b/googleapiclient/discovery_cache/documents/documentai.v1beta3.json
index 4c2fb3a4a7f..80b933dd620 100644
--- a/googleapiclient/discovery_cache/documents/documentai.v1beta3.json
+++ b/googleapiclient/discovery_cache/documents/documentai.v1beta3.json
@@ -1284,7 +1284,7 @@
       }
     }
   },
-  "revision": "20231206",
+  "revision": "20231208",
   "rootUrl": "https://documentai.googleapis.com/",
   "schemas": {
     "GoogleCloudDocumentaiUiv1beta3AutoLabelDocumentsMetadata": {
diff --git a/googleapiclient/discovery_cache/documents/domainsrdap.v1.json b/googleapiclient/discovery_cache/documents/domainsrdap.v1.json
index 20ba56d538c..ad32b9eb132 100644
--- a/googleapiclient/discovery_cache/documents/domainsrdap.v1.json
+++ b/googleapiclient/discovery_cache/documents/domainsrdap.v1.json
@@ -289,7 +289,7 @@
       }
     }
   },
-  "revision": "20231206",
+  "revision": "20231211",
   "rootUrl": "https://domainsrdap.googleapis.com/",
   "schemas": {
     "HttpBody": {
diff --git a/googleapiclient/discovery_cache/documents/doubleclicksearch.v2.json b/googleapiclient/discovery_cache/documents/doubleclicksearch.v2.json
index d20d8147eaf..10d2545fb2f 100644
--- a/googleapiclient/discovery_cache/documents/doubleclicksearch.v2.json
+++ b/googleapiclient/discovery_cache/documents/doubleclicksearch.v2.json
@@ -543,7 +543,7 @@
       }
     }
   },
-  "revision": "20231128",
+  "revision": "20231206",
   "rootUrl": "https://doubleclicksearch.googleapis.com/",
   "schemas": {
     "Availability": {
diff --git a/googleapiclient/discovery_cache/documents/drive.v2.json b/googleapiclient/discovery_cache/documents/drive.v2.json
index ceb2077a291..b1bf4bcf3b8 100644
--- a/googleapiclient/discovery_cache/documents/drive.v2.json
+++ b/googleapiclient/discovery_cache/documents/drive.v2.json
@@ -3842,7 +3842,7 @@
       }
     }
   },
-  "revision": "20231120",
+  "revision": "20231201",
   "rootUrl": "https://www.googleapis.com/",
   "schemas": {
     "About": {
diff --git a/googleapiclient/discovery_cache/documents/drive.v3.json b/googleapiclient/discovery_cache/documents/drive.v3.json
index 114c7a540de..3a3a20e5817 100644
--- a/googleapiclient/discovery_cache/documents/drive.v3.json
+++ b/googleapiclient/discovery_cache/documents/drive.v3.json
@@ -2433,7 +2433,7 @@
       }
     }
   },
-  "revision": "20231120",
+  "revision": "20231201",
   "rootUrl": "https://www.googleapis.com/",
   "schemas": {
     "About": {
@@ -2998,7 +2998,7 @@
           "type": "string"
         },
         "restrictions": {
-          "description": "A set of restrictions that apply to this shared drive or items inside this shared drive.",
+          "description": "A set of restrictions that apply to this shared drive or items inside this shared drive. Note that restrictions can't be set when creating a shared drive. To add a restriction, first create a shared drive and then use `drives.update` to add restrictions.",
           "properties": {
             "adminManagedRestrictions": {
               "description": "Whether administrative privileges on this shared drive are required to modify restrictions.",
diff --git a/googleapiclient/discovery_cache/documents/driveactivity.v2.json b/googleapiclient/discovery_cache/documents/driveactivity.v2.json
index 8f8217f2dc5..b566696f6ff 100644
--- a/googleapiclient/discovery_cache/documents/driveactivity.v2.json
+++ b/googleapiclient/discovery_cache/documents/driveactivity.v2.json
@@ -132,7 +132,7 @@
       }
     }
   },
-  "revision": "20231203",
+  "revision": "20231210",
   "rootUrl": "https://driveactivity.googleapis.com/",
   "schemas": {
     "Action": {
diff --git a/googleapiclient/discovery_cache/documents/eventarc.v1.json b/googleapiclient/discovery_cache/documents/eventarc.v1.json
index aeff4a49781..24373eda775 100644
--- a/googleapiclient/discovery_cache/documents/eventarc.v1.json
+++ b/googleapiclient/discovery_cache/documents/eventarc.v1.json
@@ -1197,7 +1197,7 @@
       }
     }
   },
-  "revision": "20231129",
+  "revision": "20231207",
   "rootUrl": "https://eventarc.googleapis.com/",
   "schemas": {
     "AuditConfig": {
diff --git a/googleapiclient/discovery_cache/documents/eventarc.v1beta1.json b/googleapiclient/discovery_cache/documents/eventarc.v1beta1.json
index fbfe40b42ac..325ad2420af 100644
--- a/googleapiclient/discovery_cache/documents/eventarc.v1beta1.json
+++ b/googleapiclient/discovery_cache/documents/eventarc.v1beta1.json
@@ -584,7 +584,7 @@
       }
     }
   },
-  "revision": "20231129",
+  "revision": "20231207",
   "rootUrl": "https://eventarc.googleapis.com/",
   "schemas": {
     "AuditConfig": {
diff --git a/googleapiclient/discovery_cache/documents/factchecktools.v1alpha1.json b/googleapiclient/discovery_cache/documents/factchecktools.v1alpha1.json
index 8581819ff22..303fd3c9850 100644
--- a/googleapiclient/discovery_cache/documents/factchecktools.v1alpha1.json
+++ b/googleapiclient/discovery_cache/documents/factchecktools.v1alpha1.json
@@ -304,7 +304,7 @@
       }
     }
   },
-  "revision": "20231203",
+  "revision": "20231210",
   "rootUrl": "https://factchecktools.googleapis.com/",
   "schemas": {
     "GoogleFactcheckingFactchecktoolsV1alpha1Claim": {
diff --git a/googleapiclient/discovery_cache/documents/fcm.v1.json b/googleapiclient/discovery_cache/documents/fcm.v1.json
index 27f27f7b584..953c90c54d3 100644
--- a/googleapiclient/discovery_cache/documents/fcm.v1.json
+++ b/googleapiclient/discovery_cache/documents/fcm.v1.json
@@ -146,7 +146,7 @@
       }
     }
   },
-  "revision": "20231128",
+  "revision": "20231208",
   "rootUrl": "https://fcm.googleapis.com/",
   "schemas": {
     "AndroidConfig": {
diff --git a/googleapiclient/discovery_cache/documents/fcmdata.v1beta1.json b/googleapiclient/discovery_cache/documents/fcmdata.v1beta1.json
index a1003704c66..a3b188c4fa1 100644
--- a/googleapiclient/discovery_cache/documents/fcmdata.v1beta1.json
+++ b/googleapiclient/discovery_cache/documents/fcmdata.v1beta1.json
@@ -154,7 +154,7 @@
       }
     }
   },
-  "revision": "20231205",
+  "revision": "20231210",
   "rootUrl": "https://fcmdata.googleapis.com/",
   "schemas": {
     "GoogleFirebaseFcmDataV1beta1AndroidDeliveryData": {
diff --git a/googleapiclient/discovery_cache/documents/file.v1.json b/googleapiclient/discovery_cache/documents/file.v1.json
index 905be5cfe5b..2a402d968da 100644
--- a/googleapiclient/discovery_cache/documents/file.v1.json
+++ b/googleapiclient/discovery_cache/documents/file.v1.json
@@ -557,7 +557,7 @@
                   ],
                   "parameters": {
                     "name": {
-                      "description": "Required. projects/{project_id}/locations/{location_id}/instances/{instance_id}. The resource name of the instance, in the format",
+                      "description": "Required. `projects/{project_id}/locations/{location_id}/instances/{instance_id}`. The resource name of the instance, in the format",
                       "location": "path",
                       "pattern": "^projects/[^/]+/locations/[^/]+/instances/[^/]+$",
                       "required": true,
@@ -874,7 +874,7 @@
       }
     }
   },
-  "revision": "20231110",
+  "revision": "20231203",
   "rootUrl": "https://file.googleapis.com/",
   "schemas": {
     "Backup": {
@@ -1073,7 +1073,7 @@
           "type": "string"
         },
         "name": {
-          "description": "Required. The name of the file share. Must use 1-16 characters for the basic service tier and 1-63 characters for all other service tiers. Must use lowercase letters, numbers, or underscores [a-z0-9_]. Must start with a letter. Immutable.",
+          "description": "Required. The name of the file share. Must use 1-16 characters for the basic service tier and 1-63 characters for all other service tiers. Must use lowercase letters, numbers, or underscores `[a-z0-9_]`. Must start with a letter. Immutable.",
           "type": "string"
         },
         "nfsExportOptions": {
@@ -1910,7 +1910,7 @@
       "id": "RevertInstanceRequest",
       "properties": {
         "targetSnapshotId": {
-          "description": "Required. The snapshot resource ID, in the format 'my-snapshot', where the specified ID is the {snapshot_id} of the fully qualified name like projects/{project_id}/locations/{location_id}/instances/{instance_id}/snapshots/{snapshot_id}",
+          "description": "Required. The snapshot resource ID, in the format 'my-snapshot', where the specified ID is the {snapshot_id} of the fully qualified name like `projects/{project_id}/locations/{location_id}/instances/{instance_id}/snapshots/{snapshot_id}`",
           "type": "string"
         }
       },
diff --git a/googleapiclient/discovery_cache/documents/file.v1beta1.json b/googleapiclient/discovery_cache/documents/file.v1beta1.json
index 282df1bc60c..6d6690951fe 100644
--- a/googleapiclient/discovery_cache/documents/file.v1beta1.json
+++ b/googleapiclient/discovery_cache/documents/file.v1beta1.json
@@ -557,7 +557,7 @@
                   ],
                   "parameters": {
                     "name": {
-                      "description": "Required. projects/{project_id}/locations/{location_id}/instances/{instance_id}. The resource name of the instance, in the format",
+                      "description": "Required. `projects/{project_id}/locations/{location_id}/instances/{instance_id}`. The resource name of the instance, in the format",
                       "location": "path",
                       "pattern": "^projects/[^/]+/locations/[^/]+/instances/[^/]+$",
                       "required": true,
@@ -1041,7 +1041,7 @@
       }
     }
   },
-  "revision": "20231110",
+  "revision": "20231203",
   "rootUrl": "https://file.googleapis.com/",
   "schemas": {
     "Backup": {
@@ -1251,7 +1251,7 @@
           "type": "string"
         },
         "name": {
-          "description": "Required. The name of the file share. Must use 1-16 characters for the basic service tier and 1-63 characters for all other service tiers. Must use lowercase letters, numbers, or underscores [a-z0-9_]. Must start with a letter. Immutable.",
+          "description": "Required. The name of the file share. Must use 1-16 characters for the basic service tier and 1-63 characters for all other service tiers. Must use lowercase letters, numbers, or underscores `[a-z0-9_]`. Must start with a letter. Immutable.",
           "type": "string"
         },
         "nfsExportOptions": {
@@ -2198,7 +2198,7 @@
       "id": "RevertInstanceRequest",
       "properties": {
         "targetSnapshotId": {
-          "description": "Required. The snapshot resource ID, in the format 'my-snapshot', where the specified ID is the {snapshot_id} of the fully qualified name like projects/{project_id}/locations/{location_id}/instances/{instance_id}/snapshots/{snapshot_id}",
+          "description": "Required. The snapshot resource ID, in the format 'my-snapshot', where the specified ID is the {snapshot_id} of the fully qualified name like `projects/{project_id}/locations/{location_id}/instances/{instance_id}/snapshots/{snapshot_id}`",
           "type": "string"
         }
       },
diff --git a/googleapiclient/discovery_cache/documents/firebase.v1beta1.json b/googleapiclient/discovery_cache/documents/firebase.v1beta1.json
index 4119778fb47..756cccb0f3a 100644
--- a/googleapiclient/discovery_cache/documents/firebase.v1beta1.json
+++ b/googleapiclient/discovery_cache/documents/firebase.v1beta1.json
@@ -1324,7 +1324,7 @@
       }
     }
   },
-  "revision": "20231205",
+  "revision": "20231207",
   "rootUrl": "https://firebase.googleapis.com/",
   "schemas": {
     "AddFirebaseRequest": {
diff --git a/googleapiclient/discovery_cache/documents/firebaseappdistribution.v1.json b/googleapiclient/discovery_cache/documents/firebaseappdistribution.v1.json
index 00b69908808..0664eb5c76e 100644
--- a/googleapiclient/discovery_cache/documents/firebaseappdistribution.v1.json
+++ b/googleapiclient/discovery_cache/documents/firebaseappdistribution.v1.json
@@ -941,7 +941,7 @@
       }
     }
   },
-  "revision": "20231206",
+  "revision": "20231211",
   "rootUrl": "https://firebaseappdistribution.googleapis.com/",
   "schemas": {
     "GdataBlobstore2Info": {
diff --git a/googleapiclient/discovery_cache/documents/firebaseappdistribution.v1alpha.json b/googleapiclient/discovery_cache/documents/firebaseappdistribution.v1alpha.json
new file mode 100644
index 00000000000..11fe8fabb8a
--- /dev/null
+++ b/googleapiclient/discovery_cache/documents/firebaseappdistribution.v1alpha.json
@@ -0,0 +1,1275 @@
+{
+  "auth": {
+    "oauth2": {
+      "scopes": {
+        "https://www.googleapis.com/auth/cloud-platform": {
+          "description": "See, edit, configure, and delete your Google Cloud data and see the email address for your Google Account."
+        }
+      }
+    }
+  },
+  "basePath": "",
+  "baseUrl": "https://firebaseappdistribution.googleapis.com/",
+  "batchPath": "batch",
+  "canonicalName": "Firebase App Distribution",
+  "description": "",
+  "discoveryVersion": "v1",
+  "documentationLink": "https://firebase.google.com/products/app-distribution",
+  "fullyEncodeReservedExpansion": true,
+  "icons": {
+    "x16": "http://www.google.com/images/icons/product/search-16.gif",
+    "x32": "http://www.google.com/images/icons/product/search-32.gif"
+  },
+  "id": "firebaseappdistribution:v1alpha",
+  "kind": "discovery#restDescription",
+  "mtlsRootUrl": "https://firebaseappdistribution.mtls.googleapis.com/",
+  "name": "firebaseappdistribution",
+  "ownerDomain": "google.com",
+  "ownerName": "Google",
+  "parameters": {
+    "$.xgafv": {
+      "description": "V1 error format.",
+      "enum": [
+        "1",
+        "2"
+      ],
+      "enumDescriptions": [
+        "v1 error format",
+        "v2 error format"
+      ],
+      "location": "query",
+      "type": "string"
+    },
+    "access_token": {
+      "description": "OAuth access token.",
+      "location": "query",
+      "type": "string"
+    },
+    "alt": {
+      "default": "json",
+      "description": "Data format for response.",
+      "enum": [
+        "json",
+        "media",
+        "proto"
+      ],
+      "enumDescriptions": [
+        "Responses with Content-Type of application/json",
+        "Media download with context-dependent Content-Type",
+        "Responses with Content-Type of application/x-protobuf"
+      ],
+      "location": "query",
+      "type": "string"
+    },
+    "callback": {
+      "description": "JSONP",
+      "location": "query",
+      "type": "string"
+    },
+    "fields": {
+      "description": "Selector specifying which fields to include in a partial response.",
+      "location": "query",
+      "type": "string"
+    },
+    "key": {
+      "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.",
+      "location": "query",
+      "type": "string"
+    },
+    "oauth_token": {
+      "description": "OAuth 2.0 token for the current user.",
+      "location": "query",
+      "type": "string"
+    },
+    "prettyPrint": {
+      "default": "true",
+      "description": "Returns response with indentations and line breaks.",
+      "location": "query",
+      "type": "boolean"
+    },
+    "quotaUser": {
+      "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.",
+      "location": "query",
+      "type": "string"
+    },
+    "uploadType": {
+      "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").",
+      "location": "query",
+      "type": "string"
+    },
+    "upload_protocol": {
+      "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").",
+      "location": "query",
+      "type": "string"
+    }
+  },
+  "protocol": "rest",
+  "resources": {
+    "apps": {
+      "methods": {
+        "get": {
+          "description": "Get the app, if it exists",
+          "flatPath": "v1alpha/apps/{mobilesdkAppId}",
+          "httpMethod": "GET",
+          "id": "firebaseappdistribution.apps.get",
+          "parameterOrder": [
+            "mobilesdkAppId"
+          ],
+          "parameters": {
+            "appView": {
+              "description": "App view. When unset or set to BASIC, returns an App with everything set except for aab_state. When set to FULL, returns an App with aab_state set.",
+              "enum": [
+                "APP_VIEW_UNSPECIFIED",
+                "BASIC",
+                "FULL"
+              ],
+              "enumDescriptions": [
+                "The default / unset value. The API will default to the BASIC view.",
+                "Include everything except aab_state.",
+                "Include everything."
+              ],
+              "location": "query",
+              "type": "string"
+            },
+            "mobilesdkAppId": {
+              "description": "Unique id for a Firebase app of the format: {version}:{project_number}:{platform}:{hash(bundle_id)} Example: 1:581234567376:android:aa0a3c7b135e90289",
+              "location": "path",
+              "required": true,
+              "type": "string"
+            }
+          },
+          "path": "v1alpha/apps/{mobilesdkAppId}",
+          "response": {
+            "$ref": "GoogleFirebaseAppdistroV1alphaApp"
+          },
+          "scopes": [
+            "https://www.googleapis.com/auth/cloud-platform"
+          ]
+        },
+        "getJwt": {
+          "description": "Get a JWT token",
+          "flatPath": "v1alpha/apps/{mobilesdkAppId}/jwt",
+          "httpMethod": "GET",
+          "id": "firebaseappdistribution.apps.getJwt",
+          "parameterOrder": [
+            "mobilesdkAppId"
+          ],
+          "parameters": {
+            "mobilesdkAppId": {
+              "description": "Unique id for a Firebase app of the format: {version}:{project_number}:{platform}:{hash(bundle_id)} Example: 1:581234567376:android:aa0a3c7b135e90289",
+              "location": "path",
+              "required": true,
+              "type": "string"
+            }
+          },
+          "path": "v1alpha/apps/{mobilesdkAppId}/jwt",
+          "response": {
+            "$ref": "GoogleFirebaseAppdistroV1alphaJwt"
+          },
+          "scopes": [
+            "https://www.googleapis.com/auth/cloud-platform"
+          ]
+        },
+        "provisionApp": {
+          "description": "Provision app distribution for an existing Firebase app, enabling it to subsequently be used by appdistro.",
+          "flatPath": "v1alpha/apps/{mobilesdkAppId}",
+          "httpMethod": "POST",
+          "id": "firebaseappdistribution.apps.provisionApp",
+          "parameterOrder": [
+            "mobilesdkAppId"
+          ],
+          "parameters": {
+            "mobilesdkAppId": {
+              "description": "Unique id for a Firebase app of the format: {version}:{project_number}:{platform}:{hash(bundle_id)} Example: 1:581234567376:android:aa0a3c7b135e90289",
+              "location": "path",
+              "required": true,
+              "type": "string"
+            }
+          },
+          "path": "v1alpha/apps/{mobilesdkAppId}",
+          "response": {
+            "$ref": "GoogleFirebaseAppdistroV1alphaProvisionAppResponse"
+          },
+          "scopes": [
+            "https://www.googleapis.com/auth/cloud-platform"
+          ]
+        }
+      },
+      "resources": {
+        "release_by_hash": {
+          "methods": {
+            "get": {
+              "description": "GET Release by binary upload hash",
+              "flatPath": "v1alpha/apps/{mobilesdkAppId}/release_by_hash/{uploadHash}",
+              "httpMethod": "GET",
+              "id": "firebaseappdistribution.apps.release_by_hash.get",
+              "parameterOrder": [
+                "mobilesdkAppId",
+                "uploadHash"
+              ],
+              "parameters": {
+                "mobilesdkAppId": {
+                  "description": "Unique id for a Firebase app of the format: {version}:{project_number}:{platform}:{hash(bundle_id)} Example: 1:581234567376:android:aa0a3c7b135e90289",
+                  "location": "path",
+                  "required": true,
+                  "type": "string"
+                },
+                "uploadHash": {
+                  "description": "The hash for the upload",
+                  "location": "path",
+                  "required": true,
+                  "type": "string"
+                }
+              },
+              "path": "v1alpha/apps/{mobilesdkAppId}/release_by_hash/{uploadHash}",
+              "response": {
+                "$ref": "GoogleFirebaseAppdistroV1alphaGetReleaseByUploadHashResponse"
+              },
+              "scopes": [
+                "https://www.googleapis.com/auth/cloud-platform"
+              ]
+            }
+          }
+        },
+        "releases": {
+          "methods": {
+            "enable_access": {
+              "description": "Enable access on a release for testers.",
+              "flatPath": "v1alpha/apps/{mobilesdkAppId}/releases/{releaseId}/enable_access",
+              "httpMethod": "POST",
+              "id": "firebaseappdistribution.apps.releases.enable_access",
+              "parameterOrder": [
+                "mobilesdkAppId",
+                "releaseId"
+              ],
+              "parameters": {
+                "mobilesdkAppId": {
+                  "description": "Unique id for a Firebase app of the format: {version}:{project_number}:{platform}:{hash(bundle_id)} Example: 1:581234567376:android:aa0a3c7b135e90289",
+                  "location": "path",
+                  "required": true,
+                  "type": "string"
+                },
+                "releaseId": {
+                  "description": "Release identifier",
+                  "location": "path",
+                  "required": true,
+                  "type": "string"
+                }
+              },
+              "path": "v1alpha/apps/{mobilesdkAppId}/releases/{releaseId}/enable_access",
+              "request": {
+                "$ref": "GoogleFirebaseAppdistroV1alphaEnableAccessOnReleaseRequest"
+              },
+              "response": {
+                "$ref": "GoogleFirebaseAppdistroV1alphaEnableAccessOnReleaseResponse"
+              },
+              "scopes": [
+                "https://www.googleapis.com/auth/cloud-platform"
+              ]
+            }
+          },
+          "resources": {
+            "notes": {
+              "methods": {
+                "create": {
+                  "description": "Create release notes on a release.",
+                  "flatPath": "v1alpha/apps/{mobilesdkAppId}/releases/{releaseId}/notes",
+                  "httpMethod": "POST",
+                  "id": "firebaseappdistribution.apps.releases.notes.create",
+                  "parameterOrder": [
+                    "mobilesdkAppId",
+                    "releaseId"
+                  ],
+                  "parameters": {
+                    "mobilesdkAppId": {
+                      "description": "Unique id for a Firebase app of the format: {version}:{project_number}:{platform}:{hash(bundle_id)} Example: 1:581234567376:android:aa0a3c7b135e90289",
+                      "location": "path",
+                      "required": true,
+                      "type": "string"
+                    },
+                    "releaseId": {
+                      "description": "Release identifier",
+                      "location": "path",
+                      "required": true,
+                      "type": "string"
+                    }
+                  },
+                  "path": "v1alpha/apps/{mobilesdkAppId}/releases/{releaseId}/notes",
+                  "request": {
+                    "$ref": "GoogleFirebaseAppdistroV1alphaCreateReleaseNotesRequest"
+                  },
+                  "response": {
+                    "$ref": "GoogleFirebaseAppdistroV1alphaCreateReleaseNotesResponse"
+                  },
+                  "scopes": [
+                    "https://www.googleapis.com/auth/cloud-platform"
+                  ]
+                }
+              }
+            }
+          }
+        },
+        "testers": {
+          "methods": {
+            "getTesterUdids": {
+              "description": "Get UDIDs of tester iOS devices in a project",
+              "flatPath": "v1alpha/apps/{mobilesdkAppId}/testers:getTesterUdids",
+              "httpMethod": "GET",
+              "id": "firebaseappdistribution.apps.testers.getTesterUdids",
+              "parameterOrder": [
+                "mobilesdkAppId"
+              ],
+              "parameters": {
+                "mobilesdkAppId": {
+                  "description": "Unique id for a Firebase app of the format: {version}:{project_number}:{platform}:{hash(bundle_id)} Example: 1:581234567376:android:aa0a3c7b135e90289",
+                  "location": "path",
+                  "required": true,
+                  "type": "string"
+                }
+              },
+              "path": "v1alpha/apps/{mobilesdkAppId}/testers:getTesterUdids",
+              "response": {
+                "$ref": "GoogleFirebaseAppdistroV1alphaGetTesterUdidsResponse"
+              },
+              "scopes": [
+                "https://www.googleapis.com/auth/cloud-platform"
+              ]
+            }
+          }
+        },
+        "upload_status": {
+          "methods": {
+            "get": {
+              "description": "GET Binary upload status by token",
+              "flatPath": "v1alpha/apps/{mobilesdkAppId}/upload_status/{uploadToken}",
+              "httpMethod": "GET",
+              "id": "firebaseappdistribution.apps.upload_status.get",
+              "parameterOrder": [
+                "mobilesdkAppId",
+                "uploadToken"
+              ],
+              "parameters": {
+                "mobilesdkAppId": {
+                  "description": "Unique id for a Firebase app of the format: {version}:{project_number}:{platform}:{hash(bundle_id)} Example: 1:581234567376:android:aa0a3c7b135e90289",
+                  "location": "path",
+                  "required": true,
+                  "type": "string"
+                },
+                "uploadToken": {
+                  "description": "The token for the upload",
+                  "location": "path",
+                  "required": true,
+                  "type": "string"
+                }
+              },
+              "path": "v1alpha/apps/{mobilesdkAppId}/upload_status/{uploadToken}",
+              "response": {
+                "$ref": "GoogleFirebaseAppdistroV1alphaGetUploadStatusResponse"
+              },
+              "scopes": [
+                "https://www.googleapis.com/auth/cloud-platform"
+              ]
+            }
+          }
+        }
+      }
+    },
+    "projects": {
+      "resources": {
+        "apps": {
+          "methods": {
+            "getTestConfig": {
+              "description": "Gets configuration for automated tests.",
+              "flatPath": "v1alpha/projects/{projectsId}/apps/{appsId}/testConfig",
+              "httpMethod": "GET",
+              "id": "firebaseappdistribution.projects.apps.getTestConfig",
+              "parameterOrder": [
+                "name"
+              ],
+              "parameters": {
+                "name": {
+                  "description": "Required. The name of the `TestConfig` resource to retrieve. Format: `projects/{project_number}/apps/{app_id}/testConfig`",
+                  "location": "path",
+                  "pattern": "^projects/[^/]+/apps/[^/]+/testConfig$",
+                  "required": true,
+                  "type": "string"
+                }
+              },
+              "path": "v1alpha/{+name}",
+              "response": {
+                "$ref": "GoogleFirebaseAppdistroV1alphaTestConfig"
+              },
+              "scopes": [
+                "https://www.googleapis.com/auth/cloud-platform"
+              ]
+            },
+            "updateTestConfig": {
+              "description": "Updates a release.",
+              "flatPath": "v1alpha/projects/{projectsId}/apps/{appsId}/testConfig",
+              "httpMethod": "PATCH",
+              "id": "firebaseappdistribution.projects.apps.updateTestConfig",
+              "parameterOrder": [
+                "name"
+              ],
+              "parameters": {
+                "name": {
+                  "description": "Identifier. The name of the test configuration resource. Format: `projects/{project_number}/apps/{app_id}/testConfig`",
+                  "location": "path",
+                  "pattern": "^projects/[^/]+/apps/[^/]+/testConfig$",
+                  "required": true,
+                  "type": "string"
+                },
+                "updateMask": {
+                  "description": "Optional. The list of fields to update.",
+                  "format": "google-fieldmask",
+                  "location": "query",
+                  "type": "string"
+                }
+              },
+              "path": "v1alpha/{+name}",
+              "request": {
+                "$ref": "GoogleFirebaseAppdistroV1alphaTestConfig"
+              },
+              "response": {
+                "$ref": "GoogleFirebaseAppdistroV1alphaTestConfig"
+              },
+              "scopes": [
+                "https://www.googleapis.com/auth/cloud-platform"
+              ]
+            }
+          },
+          "resources": {
+            "releases": {
+              "resources": {
+                "tests": {
+                  "methods": {
+                    "create": {
+                      "description": "Run automated test(s) on release.",
+                      "flatPath": "v1alpha/projects/{projectsId}/apps/{appsId}/releases/{releasesId}/tests",
+                      "httpMethod": "POST",
+                      "id": "firebaseappdistribution.projects.apps.releases.tests.create",
+                      "parameterOrder": [
+                        "parent"
+                      ],
+                      "parameters": {
+                        "parent": {
+                          "description": "Required. The name of the release resource, which is the parent of the test Format: `projects/{project_number}/apps/{app_id}/releases/{release_id}`",
+                          "location": "path",
+                          "pattern": "^projects/[^/]+/apps/[^/]+/releases/[^/]+$",
+                          "required": true,
+                          "type": "string"
+                        },
+                        "releaseTestId": {
+                          "description": "Optional. The ID to use for the test, which will become the final component of the tests's resource name. This value should be 4-63 characters, and valid characters are /a-z-/. If it is not provided one will be automatically generated.",
+                          "location": "query",
+                          "type": "string"
+                        }
+                      },
+                      "path": "v1alpha/{+parent}/tests",
+                      "request": {
+                        "$ref": "GoogleFirebaseAppdistroV1alphaReleaseTest"
+                      },
+                      "response": {
+                        "$ref": "GoogleFirebaseAppdistroV1alphaReleaseTest"
+                      },
+                      "scopes": [
+                        "https://www.googleapis.com/auth/cloud-platform"
+                      ]
+                    },
+                    "get": {
+                      "description": "Get results for automated test run on release.",
+                      "flatPath": "v1alpha/projects/{projectsId}/apps/{appsId}/releases/{releasesId}/tests/{testsId}",
+                      "httpMethod": "GET",
+                      "id": "firebaseappdistribution.projects.apps.releases.tests.get",
+                      "parameterOrder": [
+                        "name"
+                      ],
+                      "parameters": {
+                        "name": {
+                          "description": "Required. The name of the release test resource. Format: `projects/{project_number}/apps/{app_id}/releases/{release_id}/tests/{test_id}`",
+                          "location": "path",
+                          "pattern": "^projects/[^/]+/apps/[^/]+/releases/[^/]+/tests/[^/]+$",
+                          "required": true,
+                          "type": "string"
+                        }
+                      },
+                      "path": "v1alpha/{+name}",
+                      "response": {
+                        "$ref": "GoogleFirebaseAppdistroV1alphaReleaseTest"
+                      },
+                      "scopes": [
+                        "https://www.googleapis.com/auth/cloud-platform"
+                      ]
+                    },
+                    "list": {
+                      "description": "List results for automated tests run on release.",
+                      "flatPath": "v1alpha/projects/{projectsId}/apps/{appsId}/releases/{releasesId}/tests",
+                      "httpMethod": "GET",
+                      "id": "firebaseappdistribution.projects.apps.releases.tests.list",
+                      "parameterOrder": [
+                        "parent"
+                      ],
+                      "parameters": {
+                        "pageSize": {
+                          "description": "Optional. The maximum number of tests to return. The service may return fewer than this value.",
+                          "format": "int32",
+                          "location": "query",
+                          "type": "integer"
+                        },
+                        "pageToken": {
+                          "description": "Optional. A page token, received from a previous `ListReleaseTests` call. Provide this to retrieve the subsequent page.",
+                          "location": "query",
+                          "type": "string"
+                        },
+                        "parent": {
+                          "description": "Required. The name of the release resource, which is the parent of the tests Format: `projects/{project_number}/apps/{app_id}/releases/{release_id}`",
+                          "location": "path",
+                          "pattern": "^projects/[^/]+/apps/[^/]+/releases/[^/]+$",
+                          "required": true,
+                          "type": "string"
+                        }
+                      },
+                      "path": "v1alpha/{+parent}/tests",
+                      "response": {
+                        "$ref": "GoogleFirebaseAppdistroV1alphaListReleaseTestsResponse"
+                      },
+                      "scopes": [
+                        "https://www.googleapis.com/auth/cloud-platform"
+                      ]
+                    }
+                  }
+                }
+              }
+            }
+          }
+        }
+      }
+    }
+  },
+  "revision": "20231211",
+  "rootUrl": "https://firebaseappdistribution.googleapis.com/",
+  "schemas": {
+    "GoogleFirebaseAppdistroV1Release": {
+      "description": "A release of a Firebase app.",
+      "id": "GoogleFirebaseAppdistroV1Release",
+      "properties": {
+        "binaryDownloadUri": {
+          "description": "Output only. A signed link (which expires in one hour) to directly download the app binary (IPA/APK/AAB) file.",
+          "readOnly": true,
+          "type": "string"
+        },
+        "buildVersion": {
+          "description": "Output only. Build version of the release. For an Android release, the build version is the `versionCode`. For an iOS release, the build version is the `CFBundleVersion`.",
+          "readOnly": true,
+          "type": "string"
+        },
+        "createTime": {
+          "description": "Output only. The time the release was created.",
+          "format": "google-datetime",
+          "readOnly": true,
+          "type": "string"
+        },
+        "displayVersion": {
+          "description": "Output only. Display version of the release. For an Android release, the display version is the `versionName`. For an iOS release, the display version is the `CFBundleShortVersionString`.",
+          "readOnly": true,
+          "type": "string"
+        },
+        "firebaseConsoleUri": {
+          "description": "Output only. A link to the Firebase console displaying a single release.",
+          "readOnly": true,
+          "type": "string"
+        },
+        "name": {
+          "description": "The name of the release resource. Format: `projects/{project_number}/apps/{app_id}/releases/{release_id}`",
+          "type": "string"
+        },
+        "releaseNotes": {
+          "$ref": "GoogleFirebaseAppdistroV1ReleaseNotes",
+          "description": "Notes of the release."
+        },
+        "testingUri": {
+          "description": "Output only. A link to the release in the tester web clip or Android app that lets testers (which were granted access to the app) view release notes and install the app onto their devices.",
+          "readOnly": true,
+          "type": "string"
+        }
+      },
+      "type": "object"
+    },
+    "GoogleFirebaseAppdistroV1ReleaseNotes": {
+      "description": "Notes that belong to a release.",
+      "id": "GoogleFirebaseAppdistroV1ReleaseNotes",
+      "properties": {
+        "text": {
+          "description": "The text of the release notes.",
+          "type": "string"
+        }
+      },
+      "type": "object"
+    },
+    "GoogleFirebaseAppdistroV1UploadReleaseMetadata": {
+      "description": "Operation metadata for `UploadRelease`.",
+      "id": "GoogleFirebaseAppdistroV1UploadReleaseMetadata",
+      "properties": {},
+      "type": "object"
+    },
+    "GoogleFirebaseAppdistroV1UploadReleaseResponse": {
+      "description": "Response message for `UploadRelease`.",
+      "id": "GoogleFirebaseAppdistroV1UploadReleaseResponse",
+      "properties": {
+        "release": {
+          "$ref": "GoogleFirebaseAppdistroV1Release",
+          "description": "Release associated with the uploaded binary."
+        },
+        "result": {
+          "description": "Result of upload release.",
+          "enum": [
+            "UPLOAD_RELEASE_RESULT_UNSPECIFIED",
+            "RELEASE_CREATED",
+            "RELEASE_UPDATED",
+            "RELEASE_UNMODIFIED"
+          ],
+          "enumDescriptions": [
+            "Upload binary result unspecified",
+            "Upload binary resulted in a new release",
+            "Upload binary updated an existing release",
+            "Upload binary resulted in a no-op. A release with the exact same binary already exists."
+          ],
+          "type": "string"
+        }
+      },
+      "type": "object"
+    },
+    "GoogleFirebaseAppdistroV1alphaAabCertificate": {
+      "description": "App bundle test certificate",
+      "id": "GoogleFirebaseAppdistroV1alphaAabCertificate",
+      "properties": {
+        "certificateHashMd5": {
+          "description": "MD5 hash of the certificate used to resign the AAB",
+          "type": "string"
+        },
+        "certificateHashSha1": {
+          "description": "SHA1 hash of the certificate used to resign the AAB",
+          "type": "string"
+        },
+        "certificateHashSha256": {
+          "description": "SHA256 hash of the certificate used to resign the AAB",
+          "type": "string"
+        }
+      },
+      "type": "object"
+    },
+    "GoogleFirebaseAppdistroV1alphaApp": {
+      "id": "GoogleFirebaseAppdistroV1alphaApp",
+      "properties": {
+        "aabCertificate": {
+          "$ref": "GoogleFirebaseAppdistroV1alphaAabCertificate",
+          "description": "App bundle test certificate generated for the app."
+        },
+        "aabState": {
+          "description": "App bundle state. Only valid for android apps. The app_view field in the request must be set to FULL in order for this to be populated.",
+          "enum": [
+            "AAB_STATE_UNSPECIFIED",
+            "ACTIVE",
+            "PLAY_ACCOUNT_NOT_LINKED",
+            "NO_APP_WITH_GIVEN_BUNDLE_ID_IN_PLAY_ACCOUNT",
+            "APP_NOT_PUBLISHED",
+            "AAB_STATE_UNAVAILABLE",
+            "PLAY_IAS_TERMS_NOT_ACCEPTED"
+          ],
+          "enumDescriptions": [
+            "Aab state unspecified",
+            "App can receive app bundle uploads",
+            "Firebase project is not linked to a Play developer account",
+            "There is no app in linked Play developer account with the same bundle id",
+            "The app in Play developer account is not in a published state",
+            "Play App status is unavailable",
+            "Play IAS terms not accepted"
+          ],
+          "type": "string"
+        },
+        "appId": {
+          "description": "Firebase gmp app id",
+          "type": "string"
+        },
+        "bundleId": {
+          "description": "Bundle identifier",
+          "type": "string"
+        },
+        "contactEmail": {
+          "description": "Developer contact email for testers to reach out to about privacy or support issues.",
+          "type": "string"
+        },
+        "platform": {
+          "description": "iOS or Android",
+          "type": "string"
+        },
+        "projectNumber": {
+          "description": "Project number of the Firebase project, for example 300830567303.",
+          "type": "string"
+        }
+      },
+      "type": "object"
+    },
+    "GoogleFirebaseAppdistroV1alphaAppCrash": {
+      "description": "An app crash that occurred during an automated test.",
+      "id": "GoogleFirebaseAppdistroV1alphaAppCrash",
+      "properties": {
+        "message": {
+          "description": "Output only. The message associated with the crash.",
+          "readOnly": true,
+          "type": "string"
+        },
+        "stackTrace": {
+          "description": "Output only. The raw stack trace.",
+          "readOnly": true,
+          "type": "string"
+        }
+      },
+      "type": "object"
+    },
+    "GoogleFirebaseAppdistroV1alphaCreateReleaseNotesRequest": {
+      "id": "GoogleFirebaseAppdistroV1alphaCreateReleaseNotesRequest",
+      "properties": {
+        "releaseNotes": {
+          "$ref": "GoogleFirebaseAppdistroV1alphaReleaseNotes",
+          "description": "The actual release notes body from the user"
+        }
+      },
+      "type": "object"
+    },
+    "GoogleFirebaseAppdistroV1alphaCreateReleaseNotesResponse": {
+      "id": "GoogleFirebaseAppdistroV1alphaCreateReleaseNotesResponse",
+      "properties": {},
+      "type": "object"
+    },
+    "GoogleFirebaseAppdistroV1alphaDeviceExecution": {
+      "description": "The results of running an automated test on a particular device.",
+      "id": "GoogleFirebaseAppdistroV1alphaDeviceExecution",
+      "properties": {
+        "appCrash": {
+          "$ref": "GoogleFirebaseAppdistroV1alphaAppCrash",
+          "description": "Output only. An app crash, if any occurred during the test.",
+          "readOnly": true
+        },
+        "crawlGraphUri": {
+          "description": "Output only. A URI to an image of the Robo crawl graph.",
+          "readOnly": true,
+          "type": "string"
+        },
+        "device": {
+          "$ref": "GoogleFirebaseAppdistroV1alphaTestDevice",
+          "description": "Required. The device that the test was run on."
+        },
+        "failedReason": {
+          "description": "Output only. The reason why the test failed.",
+          "enum": [
+            "FAILED_REASON_UNSPECIFIED",
+            "CRASHED",
+            "NOT_INSTALLED",
+            "UNABLE_TO_CRAWL",
+            "DEVICE_OUT_OF_MEMORY"
+          ],
+          "enumDescriptions": [
+            "Reason unspecified.",
+            "The app crashed during the test.",
+            "If an app is not installed and thus no test can be run with the app. This might be caused by trying to run a test on an unsupported platform.",
+            "If the app could not be crawled (possibly because the app did not start).",
+            "If the device ran out of memory during the test."
+          ],
+          "readOnly": true,
+          "type": "string"
+        },
+        "inconclusiveReason": {
+          "description": "Output only. The reason why the test was inconclusive.",
+          "enum": [
+            "INCONCLUSIVE_REASON_UNSPECIFIED",
+            "QUOTA_EXCEEDED",
+            "INFRASTRUCTURE_FAILURE",
+            "SERVICE_NOT_ACTIVATED",
+            "NO_SIGNATURE",
+            "NO_LAUNCHER_ACTIVITY",
+            "FORBIDDEN_PERMISSIONS",
+            "DEVICE_ADMIN_RECEIVER",
+            "NO_CODE_APK",
+            "INVALID_APK_PREVIEW_SDK"
+          ],
+          "enumDescriptions": [
+            "Reason unspecified.",
+            "Not enough quota remained to run the test.",
+            "The outcome of the test could not be determined because of a failure in the test running infrastructure.",
+            "A required cloud service api is not activated (Google Cloud Testing API or Cloud Tool Results API).",
+            "The app was not signed.",
+            "A main launcher activity could not be found.",
+            "The app declares one or more permissions that are not allowed.",
+            "Device administrator applications are not allowed.",
+            "APK contains no code. See also https://developer.android.com/guide/topics/manifest/application-element.html#code",
+            "APK is built for a preview SDK which is unsupported."
+          ],
+          "readOnly": true,
+          "type": "string"
+        },
+        "resultsStoragePath": {
+          "description": "Output only. The path to a directory in Cloud Storage that will eventually contain the results for this execution. For example, gs://bucket/Nexus5-18-en-portrait.",
+          "readOnly": true,
+          "type": "string"
+        },
+        "roboStats": {
+          "$ref": "GoogleFirebaseAppdistroV1alphaRoboStats",
+          "description": "Output only. The statistics collected during the Robo test.",
+          "readOnly": true
+        },
+        "screenshotUris": {
+          "description": "Output only. A list of screenshot image URIs taken from the Robo crawl. The file names are numbered by the order in which they were taken.",
+          "items": {
+            "type": "string"
+          },
+          "readOnly": true,
+          "type": "array"
+        },
+        "state": {
+          "description": "Output only. The state of the test.",
+          "enum": [
+            "TEST_STATE_UNSPECIFIED",
+            "IN_PROGRESS",
+            "PASSED",
+            "FAILED",
+            "INCONCLUSIVE"
+          ],
+          "enumDescriptions": [
+            "Test state unspecified.",
+            "The test is in progress.",
+            "The test has passed.",
+            "The test has failed.",
+            "The test was inconclusive."
+          ],
+          "readOnly": true,
+          "type": "string"
+        },
+        "videoUri": {
+          "description": "Output only. A URI to a video of the test run.",
+          "readOnly": true,
+          "type": "string"
+        }
+      },
+      "type": "object"
+    },
+    "GoogleFirebaseAppdistroV1alphaEnableAccessOnReleaseRequest": {
+      "id": "GoogleFirebaseAppdistroV1alphaEnableAccessOnReleaseRequest",
+      "properties": {
+        "buildVersion": {
+          "deprecated": true,
+          "description": "Optional. Ignored. Used to be build version of the app release if an instance identifier was provided for the release_id.",
+          "type": "string"
+        },
+        "displayVersion": {
+          "deprecated": true,
+          "description": "Optional. Ignored. Used to be display version of the app release if an instance identifier was provided for the release_id.",
+          "type": "string"
+        },
+        "emails": {
+          "description": "Optional. An email address which should get access to this release, for example rebeccahe@google.com",
+          "items": {
+            "type": "string"
+          },
+          "type": "array"
+        },
+        "groupIds": {
+          "description": "Optional. A repeated list of group aliases to enable access to a release for Note: This field is misnamed, but can't be changed because we need to maintain compatibility with old build tools",
+          "items": {
+            "type": "string"
+          },
+          "type": "array"
+        }
+      },
+      "type": "object"
+    },
+    "GoogleFirebaseAppdistroV1alphaEnableAccessOnReleaseResponse": {
+      "id": "GoogleFirebaseAppdistroV1alphaEnableAccessOnReleaseResponse",
+      "properties": {},
+      "type": "object"
+    },
+    "GoogleFirebaseAppdistroV1alphaGetReleaseByUploadHashResponse": {
+      "description": "Response object to get the release given a upload hash",
+      "id": "GoogleFirebaseAppdistroV1alphaGetReleaseByUploadHashResponse",
+      "properties": {
+        "release": {
+          "$ref": "GoogleFirebaseAppdistroV1alphaRelease",
+          "description": "Release object"
+        }
+      },
+      "type": "object"
+    },
+    "GoogleFirebaseAppdistroV1alphaGetTesterUdidsResponse": {
+      "description": "Response containing the UDIDs of tester iOS devices in a project",
+      "id": "GoogleFirebaseAppdistroV1alphaGetTesterUdidsResponse",
+      "properties": {
+        "testerUdids": {
+          "description": "The UDIDs of tester iOS devices in a project",
+          "items": {
+            "$ref": "GoogleFirebaseAppdistroV1alphaTesterUdid"
+          },
+          "type": "array"
+        }
+      },
+      "type": "object"
+    },
+    "GoogleFirebaseAppdistroV1alphaGetUploadStatusResponse": {
+      "id": "GoogleFirebaseAppdistroV1alphaGetUploadStatusResponse",
+      "properties": {
+        "errorCode": {
+          "description": "The error code associated with (only set on \"FAILURE\")",
+          "enum": [
+            "ERROR_UNSPECIFIED",
+            "INVALID_ZIP",
+            "MISSING_PLIST",
+            "MISSING_PROFILE",
+            "VERSION_TOO_LONG",
+            "MISSING_UUIDS",
+            "MISSING_RESOURCES",
+            "MISSING_MANIFEST",
+            "IOS_METADATA_ERROR",
+            "ANDROID_METADATA_ERROR",
+            "UNSUPPORTED_PLATFORM_TYPE",
+            "BUNDLE_ID_MISMATCH",
+            "APK_NOT_ZIP_ALIGNED",
+            "INVALID_CERTIFICATE",
+            "APK_TOO_LARGE",
+            "AAB_NOT_PUBLISHED",
+            "INVALID_PLIST_DEVICE_FAMILIES",
+            "AAB_TOS_NOT_ACCEPTED",
+            "APP_NAME_TOO_LONG",
+            "AAB_DEVELOPER_ACCOUNT_NOT_LINKED",
+            "AAB_NO_APP_WITH_GIVEN_PACKAGE_NAME_IN_ACCOUNT",
+            "AAB_UPLOAD_ERROR"
+          ],
+          "enumDescriptions": [
+            "",
+            "",
+            "",
+            "",
+            "",
+            "",
+            "",
+            "",
+            "",
+            "",
+            "",
+            "",
+            "",
+            "",
+            "",
+            "",
+            "",
+            "",
+            "",
+            "",
+            "",
+            ""
+          ],
+          "type": "string"
+        },
+        "message": {
+          "description": "Any additional context for the given upload status (e.g. error message) Meant to be displayed to the client",
+          "type": "string"
+        },
+        "release": {
+          "$ref": "GoogleFirebaseAppdistroV1alphaRelease",
+          "description": "The release that was created from the upload (only set on \"SUCCESS\")"
+        },
+        "status": {
+          "description": "The status of the upload",
+          "enum": [
+            "STATUS_UNSPECIFIED",
+            "IN_PROGRESS",
+            "ALREADY_UPLOADED",
+            "SUCCESS",
+            "ERROR"
+          ],
+          "enumDescriptions": [
+            "",
+            "",
+            "",
+            "",
+            ""
+          ],
+          "type": "string"
+        }
+      },
+      "type": "object"
+    },
+    "GoogleFirebaseAppdistroV1alphaJwt": {
+      "id": "GoogleFirebaseAppdistroV1alphaJwt",
+      "properties": {
+        "token": {
+          "type": "string"
+        }
+      },
+      "type": "object"
+    },
+    "GoogleFirebaseAppdistroV1alphaListReleaseTestsResponse": {
+      "description": "The response message for `ListReleaseTests`.",
+      "id": "GoogleFirebaseAppdistroV1alphaListReleaseTestsResponse",
+      "properties": {
+        "nextPageToken": {
+          "description": "A short-lived token, which can be sent as `pageToken` to retrieve the next page. If this field is omitted, there are no subsequent pages.",
+          "type": "string"
+        },
+        "releaseTests": {
+          "description": "The tests listed.",
+          "items": {
+            "$ref": "GoogleFirebaseAppdistroV1alphaReleaseTest"
+          },
+          "type": "array"
+        }
+      },
+      "type": "object"
+    },
+    "GoogleFirebaseAppdistroV1alphaLoginCredential": {
+      "description": "Login credential for automated tests",
+      "id": "GoogleFirebaseAppdistroV1alphaLoginCredential",
+      "properties": {
+        "fieldHints": {
+          "$ref": "GoogleFirebaseAppdistroV1alphaLoginCredentialFieldHints",
+          "description": "Optional. Hints to the crawler for identifying input fields"
+        },
+        "google": {
+          "description": "Optional. Are these credentials for Google?",
+          "type": "boolean"
+        },
+        "password": {
+          "description": "Optional. Password for automated tests",
+          "type": "string"
+        },
+        "username": {
+          "description": "Optional. Username for automated tests",
+          "type": "string"
+        }
+      },
+      "type": "object"
+    },
+    "GoogleFirebaseAppdistroV1alphaLoginCredentialFieldHints": {
+      "description": "Hints to the crawler for identifying input fields",
+      "id": "GoogleFirebaseAppdistroV1alphaLoginCredentialFieldHints",
+      "properties": {
+        "passwordResourceName": {
+          "description": "Required. The Android resource name of the password UI element. For example, in Java: R.string.foo in xml: @string/foo Only the \"foo\" part is needed. Reference doc: https://developer.android.com/guide/topics/resources/accessing-resources.html",
+          "type": "string"
+        },
+        "usernameResourceName": {
+          "description": "Required. The Android resource name of the username UI element. For example, in Java: R.string.foo in xml: @string/foo Only the \"foo\" part is needed. Reference doc: https://developer.android.com/guide/topics/resources/accessing-resources.html",
+          "type": "string"
+        }
+      },
+      "type": "object"
+    },
+    "GoogleFirebaseAppdistroV1alphaProvisionAppResponse": {
+      "id": "GoogleFirebaseAppdistroV1alphaProvisionAppResponse",
+      "properties": {},
+      "type": "object"
+    },
+    "GoogleFirebaseAppdistroV1alphaRelease": {
+      "description": "Proto defining a release object",
+      "id": "GoogleFirebaseAppdistroV1alphaRelease",
+      "properties": {
+        "buildVersion": {
+          "description": "Release build version",
+          "type": "string"
+        },
+        "displayVersion": {
+          "description": "Release version",
+          "type": "string"
+        },
+        "distributedAt": {
+          "description": "Timestamp when the release was created",
+          "format": "google-datetime",
+          "type": "string"
+        },
+        "id": {
+          "description": "Release Id",
+          "type": "string"
+        },
+        "instanceId": {
+          "description": "Instance id of the release",
+          "type": "string"
+        },
+        "lastActivityAt": {
+          "description": "Last activity timestamp",
+          "format": "google-datetime",
+          "type": "string"
+        },
+        "openInvitationCount": {
+          "description": "Number of testers who have open invitations for the release",
+          "format": "int32",
+          "type": "integer"
+        },
+        "receivedAt": {
+          "deprecated": true,
+          "description": "unused. ",
+          "format": "google-datetime",
+          "type": "string"
+        },
+        "releaseNotesSummary": {
+          "description": "Release notes summary",
+          "type": "string"
+        },
+        "testerCount": {
+          "description": "Count of testers added to the release",
+          "format": "int32",
+          "type": "integer"
+        },
+        "testerWithInstallCount": {
+          "description": "Number of testers who have installed the release",
+          "format": "int32",
+          "type": "integer"
+        }
+      },
+      "type": "object"
+    },
+    "GoogleFirebaseAppdistroV1alphaReleaseNotes": {
+      "id": "GoogleFirebaseAppdistroV1alphaReleaseNotes",
+      "properties": {
+        "releaseNotes": {
+          "type": "string"
+        }
+      },
+      "type": "object"
+    },
+    "GoogleFirebaseAppdistroV1alphaReleaseTest": {
+      "description": "The results of running an automated test on a release.",
+      "id": "GoogleFirebaseAppdistroV1alphaReleaseTest",
+      "properties": {
+        "createTime": {
+          "description": "Output only. Timestamp when the test was run.",
+          "format": "google-datetime",
+          "readOnly": true,
+          "type": "string"
+        },
+        "deviceExecutions": {
+          "description": "Required. The results of the test on each device.",
+          "items": {
+            "$ref": "GoogleFirebaseAppdistroV1alphaDeviceExecution"
+          },
+          "type": "array"
+        },
+        "loginCredential": {
+          "$ref": "GoogleFirebaseAppdistroV1alphaLoginCredential",
+          "description": "Optional. Input only. Login credentials for the test. Input only."
+        },
+        "name": {
+          "description": "The name of the release test resource. Format: `projects/{project_number}/apps/{app_id}/releases/{release_id}/tests/{test_id}`",
+          "type": "string"
+        }
+      },
+      "type": "object"
+    },
+    "GoogleFirebaseAppdistroV1alphaRoboCrawler": {
+      "description": "Configuration for Robo crawler",
+      "id": "GoogleFirebaseAppdistroV1alphaRoboCrawler",
+      "properties": {
+        "loginCredential": {
+          "$ref": "GoogleFirebaseAppdistroV1alphaLoginCredential",
+          "description": "Optional. Login credential for automated tests"
+        }
+      },
+      "type": "object"
+    },
+    "GoogleFirebaseAppdistroV1alphaRoboStats": {
+      "description": "Statistics collected during a Robo test.",
+      "id": "GoogleFirebaseAppdistroV1alphaRoboStats",
+      "properties": {
+        "actionsPerformed": {
+          "description": "Output only. Number of actions that crawler performed.",
+          "format": "int32",
+          "readOnly": true,
+          "type": "integer"
+        },
+        "crawlDuration": {
+          "description": "Output only. Duration of crawl.",
+          "format": "google-duration",
+          "readOnly": true,
+          "type": "string"
+        },
+        "distinctVisitedScreens": {
+          "description": "Output only. Number of distinct screens visited.",
+          "format": "int32",
+          "readOnly": true,
+          "type": "integer"
+        },
+        "mainActivityCrawlTimedOut": {
+          "description": "Output only. Whether the main activity crawl timed out.",
+          "readOnly": true,
+          "type": "boolean"
+        }
+      },
+      "type": "object"
+    },
+    "GoogleFirebaseAppdistroV1alphaTestConfig": {
+      "description": "Configuration for automated tests",
+      "id": "GoogleFirebaseAppdistroV1alphaTestConfig",
+      "properties": {
+        "name": {
+          "description": "Identifier. The name of the test configuration resource. Format: `projects/{project_number}/apps/{app_id}/testConfig`",
+          "type": "string"
+        },
+        "roboCrawler": {
+          "$ref": "GoogleFirebaseAppdistroV1alphaRoboCrawler",
+          "description": "Optional. Configuration for Robo crawler"
+        },
+        "testDevices": {
+          "description": "Optional. Tests will be run on this list of devices",
+          "items": {
+            "$ref": "GoogleFirebaseAppdistroV1alphaTestDevice"
+          },
+          "type": "array"
+        }
+      },
+      "type": "object"
+    },
+    "GoogleFirebaseAppdistroV1alphaTestDevice": {
+      "description": "A device on which automated tests can be run.",
+      "id": "GoogleFirebaseAppdistroV1alphaTestDevice",
+      "properties": {
+        "locale": {
+          "description": "Optional. The locale of the device (e.g. \"en_US\" for US English) during the test.",
+          "type": "string"
+        },
+        "model": {
+          "description": "Required. The device model.",
+          "type": "string"
+        },
+        "orientation": {
+          "description": "Optional. The orientation of the device during the test.",
+          "type": "string"
+        },
+        "version": {
+          "description": "Required. The version of the device (API level on Android).",
+          "type": "string"
+        }
+      },
+      "type": "object"
+    },
+    "GoogleFirebaseAppdistroV1alphaTesterUdid": {
+      "description": "The UDIDs of a tester's iOS device",
+      "id": "GoogleFirebaseAppdistroV1alphaTesterUdid",
+      "properties": {
+        "name": {
+          "description": "The name of the tester's device",
+          "type": "string"
+        },
+        "platform": {
+          "description": "The platform of the tester's device",
+          "type": "string"
+        },
+        "udid": {
+          "description": "The UDID of the tester's device",
+          "type": "string"
+        }
+      },
+      "type": "object"
+    }
+  },
+  "servicePath": "",
+  "title": "Firebase App Distribution API",
+  "version": "v1alpha",
+  "version_module": true
+}
\ No newline at end of file
diff --git a/googleapiclient/discovery_cache/documents/firebasedatabase.v1beta.json b/googleapiclient/discovery_cache/documents/firebasedatabase.v1beta.json
index ac4f30d7b26..9bb73700ee6 100644
--- a/googleapiclient/discovery_cache/documents/firebasedatabase.v1beta.json
+++ b/googleapiclient/discovery_cache/documents/firebasedatabase.v1beta.json
@@ -351,7 +351,7 @@
       }
     }
   },
-  "revision": "20231205",
+  "revision": "20231210",
   "rootUrl": "https://firebasedatabase.googleapis.com/",
   "schemas": {
     "DatabaseInstance": {
diff --git a/googleapiclient/discovery_cache/documents/firebasedynamiclinks.v1.json b/googleapiclient/discovery_cache/documents/firebasedynamiclinks.v1.json
index 3f65fe28e40..b366a6addf8 100644
--- a/googleapiclient/discovery_cache/documents/firebasedynamiclinks.v1.json
+++ b/googleapiclient/discovery_cache/documents/firebasedynamiclinks.v1.json
@@ -224,7 +224,7 @@
       }
     }
   },
-  "revision": "20231206",
+  "revision": "20231212",
   "rootUrl": "https://firebasedynamiclinks.googleapis.com/",
   "schemas": {
     "AnalyticsInfo": {
diff --git a/googleapiclient/discovery_cache/documents/firebasehosting.v1.json b/googleapiclient/discovery_cache/documents/firebasehosting.v1.json
index 0fb6730017d..f8d96994434 100644
--- a/googleapiclient/discovery_cache/documents/firebasehosting.v1.json
+++ b/googleapiclient/discovery_cache/documents/firebasehosting.v1.json
@@ -269,7 +269,7 @@
       }
     }
   },
-  "revision": "20231205",
+  "revision": "20231210",
   "rootUrl": "https://firebasehosting.googleapis.com/",
   "schemas": {
     "CancelOperationRequest": {
diff --git a/googleapiclient/discovery_cache/documents/firebasehosting.v1beta1.json b/googleapiclient/discovery_cache/documents/firebasehosting.v1beta1.json
index 24cd96cf330..4879a83bce5 100644
--- a/googleapiclient/discovery_cache/documents/firebasehosting.v1beta1.json
+++ b/googleapiclient/discovery_cache/documents/firebasehosting.v1beta1.json
@@ -2422,7 +2422,7 @@
       }
     }
   },
-  "revision": "20231205",
+  "revision": "20231210",
   "rootUrl": "https://firebasehosting.googleapis.com/",
   "schemas": {
     "ActingUser": {
diff --git a/googleapiclient/discovery_cache/documents/firebaseml.v1.json b/googleapiclient/discovery_cache/documents/firebaseml.v1.json
index 461c47c8347..81ef86a2541 100644
--- a/googleapiclient/discovery_cache/documents/firebaseml.v1.json
+++ b/googleapiclient/discovery_cache/documents/firebaseml.v1.json
@@ -204,7 +204,7 @@
       }
     }
   },
-  "revision": "20231204",
+  "revision": "20231206",
   "rootUrl": "https://firebaseml.googleapis.com/",
   "schemas": {
     "CancelOperationRequest": {
diff --git a/googleapiclient/discovery_cache/documents/firebaseml.v1beta2.json b/googleapiclient/discovery_cache/documents/firebaseml.v1beta2.json
index 2f3f5e3581f..b859e22bbf5 100644
--- a/googleapiclient/discovery_cache/documents/firebaseml.v1beta2.json
+++ b/googleapiclient/discovery_cache/documents/firebaseml.v1beta2.json
@@ -318,7 +318,7 @@
       }
     }
   },
-  "revision": "20231204",
+  "revision": "20231206",
   "rootUrl": "https://firebaseml.googleapis.com/",
   "schemas": {
     "DownloadModelResponse": {
diff --git a/googleapiclient/discovery_cache/documents/fitness.v1.json b/googleapiclient/discovery_cache/documents/fitness.v1.json
index 5bc5be12a94..423eee2248c 100644
--- a/googleapiclient/discovery_cache/documents/fitness.v1.json
+++ b/googleapiclient/discovery_cache/documents/fitness.v1.json
@@ -832,7 +832,7 @@
       }
     }
   },
-  "revision": "20231207",
+  "revision": "20231208",
   "rootUrl": "https://fitness.googleapis.com/",
   "schemas": {
     "AggregateBucket": {
diff --git a/googleapiclient/discovery_cache/documents/gkebackup.v1.json b/googleapiclient/discovery_cache/documents/gkebackup.v1.json
index 6372fa259a5..1b8243e6f53 100644
--- a/googleapiclient/discovery_cache/documents/gkebackup.v1.json
+++ b/googleapiclient/discovery_cache/documents/gkebackup.v1.json
@@ -1688,7 +1688,7 @@
       }
     }
   },
-  "revision": "20231108",
+  "revision": "20231208",
   "rootUrl": "https://gkebackup.googleapis.com/",
   "schemas": {
     "AuditConfig": {
@@ -2705,7 +2705,7 @@
       "type": "object"
     },
     "RestoreConfig": {
-      "description": "Configuration of a restore. Next id: 13",
+      "description": "Configuration of a restore. Next id: 14",
       "id": "RestoreConfig",
       "properties": {
         "allNamespaces": {
diff --git a/googleapiclient/discovery_cache/documents/gkehub.v1.json b/googleapiclient/discovery_cache/documents/gkehub.v1.json
index 3e181d885a4..af4b28e3677 100644
--- a/googleapiclient/discovery_cache/documents/gkehub.v1.json
+++ b/googleapiclient/discovery_cache/documents/gkehub.v1.json
@@ -1834,7 +1834,7 @@
       }
     }
   },
-  "revision": "20231114",
+  "revision": "20231201",
   "rootUrl": "https://gkehub.googleapis.com/",
   "schemas": {
     "AppDevExperienceFeatureSpec": {
@@ -2178,21 +2178,10 @@
       "description": "Per-membership state for this feature.",
       "id": "ClusterUpgradeMembershipState",
       "properties": {
-        "fleet": {
-          "description": "Project number or id of the fleet. It is set only for Memberships that are part of fleet-based Rollout Sequencing.",
-          "type": "string"
-        },
         "ignored": {
           "$ref": "ClusterUpgradeIgnoredMembership",
           "description": "Whether this membership is ignored by the feature. For example, manually upgraded clusters can be ignored if they are newer than the default versions of its release channel."
         },
-        "scopes": {
-          "description": "Fully qualified scope names that this clusters is bound to which also have rollout sequencing enabled.",
-          "items": {
-            "type": "string"
-          },
-          "type": "array"
-        },
         "upgrades": {
           "description": "Actual upgrade state against desired.",
           "items": {
@@ -2341,7 +2330,7 @@
           "description": "Git repo configuration for the cluster."
         },
         "metricsGcpServiceAccountEmail": {
-          "description": "The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. This field is required when automatic Feature management is enabled.",
+          "description": "The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA.",
           "type": "string"
         },
         "oci": {
diff --git a/googleapiclient/discovery_cache/documents/gkehub.v1alpha.json b/googleapiclient/discovery_cache/documents/gkehub.v1alpha.json
index d382dcc7fcb..52d005ef3e3 100644
--- a/googleapiclient/discovery_cache/documents/gkehub.v1alpha.json
+++ b/googleapiclient/discovery_cache/documents/gkehub.v1alpha.json
@@ -2098,7 +2098,7 @@
       }
     }
   },
-  "revision": "20231114",
+  "revision": "20231201",
   "rootUrl": "https://gkehub.googleapis.com/",
   "schemas": {
     "AnthosObservabilityFeatureSpec": {
@@ -2511,10 +2511,6 @@
       "description": "Per-membership state for this feature.",
       "id": "ClusterUpgradeMembershipState",
       "properties": {
-        "fleet": {
-          "description": "Project number or id of the fleet. It is set only for Memberships that are part of fleet-based Rollout Sequencing.",
-          "type": "string"
-        },
         "ignored": {
           "$ref": "ClusterUpgradeIgnoredMembership",
           "description": "Whether this membership is ignored by the feature. For example, manually upgraded clusters can be ignored if they are newer than the default versions of its release channel."
@@ -2799,7 +2795,7 @@
           "description": "Git repo configuration for the cluster."
         },
         "metricsGcpServiceAccountEmail": {
-          "description": "The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. This field is required when automatic Feature management is enabled.",
+          "description": "The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA.",
           "type": "string"
         },
         "oci": {
diff --git a/googleapiclient/discovery_cache/documents/gkehub.v1alpha2.json b/googleapiclient/discovery_cache/documents/gkehub.v1alpha2.json
index 55d1f2a98af..df295800da5 100644
--- a/googleapiclient/discovery_cache/documents/gkehub.v1alpha2.json
+++ b/googleapiclient/discovery_cache/documents/gkehub.v1alpha2.json
@@ -657,7 +657,7 @@
       }
     }
   },
-  "revision": "20231114",
+  "revision": "20231201",
   "rootUrl": "https://gkehub.googleapis.com/",
   "schemas": {
     "ApplianceCluster": {
diff --git a/googleapiclient/discovery_cache/documents/gkehub.v1beta.json b/googleapiclient/discovery_cache/documents/gkehub.v1beta.json
index a8e7a374909..1a08110ae48 100644
--- a/googleapiclient/discovery_cache/documents/gkehub.v1beta.json
+++ b/googleapiclient/discovery_cache/documents/gkehub.v1beta.json
@@ -2024,7 +2024,7 @@
       }
     }
   },
-  "revision": "20231114",
+  "revision": "20231201",
   "rootUrl": "https://gkehub.googleapis.com/",
   "schemas": {
     "AnthosObservabilityFeatureSpec": {
@@ -2398,21 +2398,10 @@
       "description": "Per-membership state for this feature.",
       "id": "ClusterUpgradeMembershipState",
       "properties": {
-        "fleet": {
-          "description": "Project number or id of the fleet. It is set only for Memberships that are part of fleet-based Rollout Sequencing.",
-          "type": "string"
-        },
         "ignored": {
           "$ref": "ClusterUpgradeIgnoredMembership",
           "description": "Whether this membership is ignored by the feature. For example, manually upgraded clusters can be ignored if they are newer than the default versions of its release channel."
         },
-        "scopes": {
-          "description": "Fully qualified scope names that this clusters is bound to which also have rollout sequencing enabled.",
-          "items": {
-            "type": "string"
-          },
-          "type": "array"
-        },
         "upgrades": {
           "description": "Actual upgrade state against desired.",
           "items": {
@@ -2616,7 +2605,7 @@
           "description": "Git repo configuration for the cluster."
         },
         "metricsGcpServiceAccountEmail": {
-          "description": "The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. This field is required when automatic Feature management is enabled.",
+          "description": "The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA.",
           "type": "string"
         },
         "oci": {
diff --git a/googleapiclient/discovery_cache/documents/gkehub.v1beta1.json b/googleapiclient/discovery_cache/documents/gkehub.v1beta1.json
index 7ce81e1cd98..f5ce499f612 100644
--- a/googleapiclient/discovery_cache/documents/gkehub.v1beta1.json
+++ b/googleapiclient/discovery_cache/documents/gkehub.v1beta1.json
@@ -712,7 +712,7 @@
       }
     }
   },
-  "revision": "20231114",
+  "revision": "20231201",
   "rootUrl": "https://gkehub.googleapis.com/",
   "schemas": {
     "ApplianceCluster": {
diff --git a/googleapiclient/discovery_cache/documents/gkehub.v2alpha.json b/googleapiclient/discovery_cache/documents/gkehub.v2alpha.json
index 293dfcd7385..ece168452d8 100644
--- a/googleapiclient/discovery_cache/documents/gkehub.v2alpha.json
+++ b/googleapiclient/discovery_cache/documents/gkehub.v2alpha.json
@@ -280,7 +280,7 @@
       }
     }
   },
-  "revision": "20231114",
+  "revision": "20231201",
   "rootUrl": "https://gkehub.googleapis.com/",
   "schemas": {
     "CancelOperationRequest": {
diff --git a/googleapiclient/discovery_cache/documents/gkeonprem.v1.json b/googleapiclient/discovery_cache/documents/gkeonprem.v1.json
index 3c2bec5da7c..2c3d18dfd6d 100644
--- a/googleapiclient/discovery_cache/documents/gkeonprem.v1.json
+++ b/googleapiclient/discovery_cache/documents/gkeonprem.v1.json
@@ -2996,7 +2996,7 @@
       }
     }
   },
-  "revision": "20231108",
+  "revision": "20231204",
   "rootUrl": "https://gkeonprem.googleapis.com/",
   "schemas": {
     "Authorization": {
@@ -5818,6 +5818,10 @@
         "resourcePool": {
           "description": "The name of the vCenter resource pool for the admin cluster.",
           "type": "string"
+        },
+        "storagePolicyName": {
+          "description": "The name of the vCenter storage policy for the user cluster.",
+          "type": "string"
         }
       },
       "type": "object"
@@ -6327,7 +6331,8 @@
           "description": "Configuration settings for a static IP configuration."
         },
         "vcenterNetwork": {
-          "description": "vcenter_network specifies vCenter network name. Inherited from the admin cluster.",
+          "description": "Output only. vcenter_network specifies vCenter network name. Inherited from the admin cluster.",
+          "readOnly": true,
           "type": "string"
         }
       },
diff --git a/googleapiclient/discovery_cache/documents/gmailpostmastertools.v1.json b/googleapiclient/discovery_cache/documents/gmailpostmastertools.v1.json
index eb989333ab0..aaade61bb0b 100644
--- a/googleapiclient/discovery_cache/documents/gmailpostmastertools.v1.json
+++ b/googleapiclient/discovery_cache/documents/gmailpostmastertools.v1.json
@@ -265,7 +265,7 @@
       }
     }
   },
-  "revision": "20231205",
+  "revision": "20231210",
   "rootUrl": "https://gmailpostmastertools.googleapis.com/",
   "schemas": {
     "DeliveryError": {
diff --git a/googleapiclient/discovery_cache/documents/gmailpostmastertools.v1beta1.json b/googleapiclient/discovery_cache/documents/gmailpostmastertools.v1beta1.json
index 36a943795c7..3c380e62089 100644
--- a/googleapiclient/discovery_cache/documents/gmailpostmastertools.v1beta1.json
+++ b/googleapiclient/discovery_cache/documents/gmailpostmastertools.v1beta1.json
@@ -265,7 +265,7 @@
       }
     }
   },
-  "revision": "20231205",
+  "revision": "20231210",
   "rootUrl": "https://gmailpostmastertools.googleapis.com/",
   "schemas": {
     "DeliveryError": {
diff --git a/googleapiclient/discovery_cache/documents/healthcare.v1.json b/googleapiclient/discovery_cache/documents/healthcare.v1.json
index d517672534a..a2cfdd5d481 100644
--- a/googleapiclient/discovery_cache/documents/healthcare.v1.json
+++ b/googleapiclient/discovery_cache/documents/healthcare.v1.json
@@ -4431,7 +4431,7 @@
       }
     }
   },
-  "revision": "20231115",
+  "revision": "20231127",
   "rootUrl": "https://healthcare.googleapis.com/",
   "schemas": {
     "ActivateConsentRequest": {
diff --git a/googleapiclient/discovery_cache/documents/healthcare.v1beta1.json b/googleapiclient/discovery_cache/documents/healthcare.v1beta1.json
index 568b01cb96c..a5fba36d815 100644
--- a/googleapiclient/discovery_cache/documents/healthcare.v1beta1.json
+++ b/googleapiclient/discovery_cache/documents/healthcare.v1beta1.json
@@ -5451,7 +5451,7 @@
       }
     }
   },
-  "revision": "20231115",
+  "revision": "20231127",
   "rootUrl": "https://healthcare.googleapis.com/",
   "schemas": {
     "AccessDeterminationLogConfig": {
@@ -5469,7 +5469,7 @@
           "enumDescriptions": [
             "No log level specified. This value is unused.",
             "No additional consent-related logging is added to audit logs.",
-            "The following information is included: - One of the following [`consentMode`](https://cloud.google.com/healthcare-api/private/docs/how-tos/fhir-consent#audit_logs) fields: (`off`|`emptyScope`|`enforced`|`btg`|`bypass`). - The accessor's request headers - The `log_level` of the [AccessDeterminationLogConfig](google.cloud.healthcare.v1beta1.fhir.FhirStore.ConsentConfig.AccessDeterminationLogConfig) - The final consent evaluation (`PERMIT`, `DENY`, or `NO_CONSENT`) - A human-readable summary of the evaluation",
+            "The following information is included: - One of the following [`consentMode`](https://cloud.google.com/healthcare-api/docs/fhir-consent#audit_logs) fields: (`off`|`emptyScope`|`enforced`|`btg`|`bypass`). - The accessor's request headers - The `log_level` of the [AccessDeterminationLogConfig](google.cloud.healthcare.v1beta1.fhir.FhirStore.ConsentConfig.AccessDeterminationLogConfig) - The final consent evaluation (`PERMIT`, `DENY`, or `NO_CONSENT`) - A human-readable summary of the evaluation",
             "Includes `MINIMUM` and, for each resource owner, returns: - The resource owner's name - Most specific part of the `X-Consent-Scope` resulting in consensual determination - Timestamp of the applied enforcement leading to the decision - Enforcement version at the time the applicable consents were applied - The Consent resource name - The timestamp of the Consent resource used for enforcement - Policy type (PATIENT or ADMIN) Note that this mode adds some overhead to CRUD operations."
           ],
           "type": "string"
@@ -6282,7 +6282,7 @@
           "description": "Optional. Specifies how the server logs the consent-aware requests. If not specified, the `AccessDeterminationLogConfig.LogLevel.MINIMUM` option is used."
         },
         "accessEnforced": {
-          "description": "Optional. If set to true, when accessing FHIR resources, the consent headers provided using [SMART-on-FHIR](https://cloud.google.com/healthcare/private/docs/how-tos/smart-on-fhir) will be verified against consents given by patients. See the ConsentEnforcementVersion for the supported consent headers.",
+          "description": "Optional. If set to true, when accessing FHIR resources, the consent headers will be verified against consents given by patients. See the ConsentEnforcementVersion for the supported consent headers.",
           "type": "boolean"
         },
         "consentHeaderHandling": {
@@ -6304,7 +6304,7 @@
           ],
           "enumDescriptions": [
             "Users must specify an enforcement version or an error is returned.",
-            "Enforcement version 1. See the [FHIR Consent resources in the Cloud Healthcare API](https://cloud.google.com/healthcare-api/private/docs/how-tos/fhir-consent) guide for more details."
+            "Enforcement version 1. See the [FHIR Consent resources in the Cloud Healthcare API](https://cloud.google.com/healthcare-api/docs/fhir-consent) guide for more details."
           ],
           "type": "string"
         }
diff --git a/googleapiclient/discovery_cache/documents/iam.v2beta.json b/googleapiclient/discovery_cache/documents/iam.v2beta.json
index 5278c307adc..17513524a2b 100644
--- a/googleapiclient/discovery_cache/documents/iam.v2beta.json
+++ b/googleapiclient/discovery_cache/documents/iam.v2beta.json
@@ -293,7 +293,7 @@
       }
     }
   },
-  "revision": "20231026",
+  "revision": "20231130",
   "rootUrl": "https://iam.googleapis.com/",
   "schemas": {
     "GoogleCloudCommonOperationMetadata": {
diff --git a/googleapiclient/discovery_cache/documents/iap.v1.json b/googleapiclient/discovery_cache/documents/iap.v1.json
index 3500a45540c..16ad42d0118 100644
--- a/googleapiclient/discovery_cache/documents/iap.v1.json
+++ b/googleapiclient/discovery_cache/documents/iap.v1.json
@@ -682,7 +682,7 @@
       }
     }
   },
-  "revision": "20231204",
+  "revision": "20231208",
   "rootUrl": "https://iap.googleapis.com/",
   "schemas": {
     "AccessDeniedPageSettings": {
diff --git a/googleapiclient/discovery_cache/documents/iap.v1beta1.json b/googleapiclient/discovery_cache/documents/iap.v1beta1.json
index b5b9670299d..3ace4030378 100644
--- a/googleapiclient/discovery_cache/documents/iap.v1beta1.json
+++ b/googleapiclient/discovery_cache/documents/iap.v1beta1.json
@@ -194,7 +194,7 @@
       }
     }
   },
-  "revision": "20231201",
+  "revision": "20231204",
   "rootUrl": "https://iap.googleapis.com/",
   "schemas": {
     "Binding": {
diff --git a/googleapiclient/discovery_cache/documents/indexing.v3.json b/googleapiclient/discovery_cache/documents/indexing.v3.json
index 095c341b504..4a1b01c2bf8 100644
--- a/googleapiclient/discovery_cache/documents/indexing.v3.json
+++ b/googleapiclient/discovery_cache/documents/indexing.v3.json
@@ -149,7 +149,7 @@
       }
     }
   },
-  "revision": "20231128",
+  "revision": "20231205",
   "rootUrl": "https://indexing.googleapis.com/",
   "schemas": {
     "PublishUrlNotificationResponse": {
diff --git a/googleapiclient/discovery_cache/documents/keep.v1.json b/googleapiclient/discovery_cache/documents/keep.v1.json
index 3fe4ae7f409..74ce7d465fa 100644
--- a/googleapiclient/discovery_cache/documents/keep.v1.json
+++ b/googleapiclient/discovery_cache/documents/keep.v1.json
@@ -314,7 +314,7 @@
       }
     }
   },
-  "revision": "20231128",
+  "revision": "20231207",
   "rootUrl": "https://keep.googleapis.com/",
   "schemas": {
     "Attachment": {
diff --git a/googleapiclient/discovery_cache/documents/libraryagent.v1.json b/googleapiclient/discovery_cache/documents/libraryagent.v1.json
index 3ad1af63cf8..7fc8c6c6c44 100644
--- a/googleapiclient/discovery_cache/documents/libraryagent.v1.json
+++ b/googleapiclient/discovery_cache/documents/libraryagent.v1.json
@@ -279,7 +279,7 @@
       }
     }
   },
-  "revision": "20231205",
+  "revision": "20231210",
   "rootUrl": "https://libraryagent.googleapis.com/",
   "schemas": {
     "GoogleExampleLibraryagentV1Book": {
diff --git a/googleapiclient/discovery_cache/documents/lifesciences.v2beta.json b/googleapiclient/discovery_cache/documents/lifesciences.v2beta.json
index 6726916b861..e94fa8f3937 100644
--- a/googleapiclient/discovery_cache/documents/lifesciences.v2beta.json
+++ b/googleapiclient/discovery_cache/documents/lifesciences.v2beta.json
@@ -312,7 +312,7 @@
       }
     }
   },
-  "revision": "20231110",
+  "revision": "20231201",
   "rootUrl": "https://lifesciences.googleapis.com/",
   "schemas": {
     "Accelerator": {
diff --git a/googleapiclient/discovery_cache/documents/localservices.v1.json b/googleapiclient/discovery_cache/documents/localservices.v1.json
index 465d5ca48f9..1748c710b19 100644
--- a/googleapiclient/discovery_cache/documents/localservices.v1.json
+++ b/googleapiclient/discovery_cache/documents/localservices.v1.json
@@ -250,7 +250,7 @@
       }
     }
   },
-  "revision": "20231205",
+  "revision": "20231210",
   "rootUrl": "https://localservices.googleapis.com/",
   "schemas": {
     "GoogleAdsHomeservicesLocalservicesV1AccountReport": {
diff --git a/googleapiclient/discovery_cache/documents/logging.v2.json b/googleapiclient/discovery_cache/documents/logging.v2.json
index 8101cfb462c..7dea307d375 100644
--- a/googleapiclient/discovery_cache/documents/logging.v2.json
+++ b/googleapiclient/discovery_cache/documents/logging.v2.json
@@ -127,7 +127,7 @@
     "billingAccounts": {
       "methods": {
         "getCmekSettings": {
-          "description": "Gets the Logging CMEK settings for the given resource.Note: CMEK for the Log Router can be configured for Google Cloud projects, folders, organizations and billing accounts. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.",
+          "description": "Gets the Logging CMEK settings for the given resource.Note: CMEK for the Log Router can be configured for Google Cloud projects, folders, organizations, and billing accounts. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.",
           "flatPath": "v2/billingAccounts/{billingAccountsId}/cmekSettings",
           "httpMethod": "GET",
           "id": "logging.billingAccounts.getCmekSettings",
@@ -136,7 +136,7 @@
           ],
           "parameters": {
             "name": {
-              "description": "Required. The resource for which to retrieve CMEK settings. \"projects/[PROJECT_ID]/cmekSettings\" \"organizations/[ORGANIZATION_ID]/cmekSettings\" \"billingAccounts/[BILLING_ACCOUNT_ID]/cmekSettings\" \"folders/[FOLDER_ID]/cmekSettings\" For example:\"organizations/12345/cmekSettings\"Note: CMEK for the Log Router can be configured for Google Cloud projects, folders, organizations and billing accounts. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.",
+              "description": "Required. The resource for which to retrieve CMEK settings. \"projects/[PROJECT_ID]/cmekSettings\" \"organizations/[ORGANIZATION_ID]/cmekSettings\" \"billingAccounts/[BILLING_ACCOUNT_ID]/cmekSettings\" \"folders/[FOLDER_ID]/cmekSettings\" For example:\"organizations/12345/cmekSettings\"Note: CMEK for the Log Router can be configured for Google Cloud projects, folders, organizations, and billing accounts. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.",
               "location": "path",
               "pattern": "^billingAccounts/[^/]+$",
               "required": true,
@@ -155,7 +155,7 @@
           ]
         },
         "getSettings": {
-          "description": "Gets the Log Router settings for the given resource.Note: Settings for the Log Router can be get for Google Cloud projects, folders, organizations and billing accounts. Currently it can only be configured for organizations. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.",
+          "description": "Gets the settings for the given resource.Note: Settings can be retrieved for Google Cloud projects, folders, organizations, and billing accounts.See View default resource settings for Logging (https://cloud.google.com/logging/docs/default-settings#view-org-settings) for more information.",
           "flatPath": "v2/billingAccounts/{billingAccountsId}/settings",
           "httpMethod": "GET",
           "id": "logging.billingAccounts.getSettings",
@@ -164,7 +164,7 @@
           ],
           "parameters": {
             "name": {
-              "description": "Required. The resource for which to retrieve settings. \"projects/[PROJECT_ID]/settings\" \"organizations/[ORGANIZATION_ID]/settings\" \"billingAccounts/[BILLING_ACCOUNT_ID]/settings\" \"folders/[FOLDER_ID]/settings\" For example:\"organizations/12345/settings\"Note: Settings for the Log Router can be get for Google Cloud projects, folders, organizations and billing accounts. Currently it can only be configured for organizations. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.",
+              "description": "Required. The resource for which to retrieve settings. \"projects/[PROJECT_ID]/settings\" \"organizations/[ORGANIZATION_ID]/settings\" \"billingAccounts/[BILLING_ACCOUNT_ID]/settings\" \"folders/[FOLDER_ID]/settings\" For example:\"organizations/12345/settings\"Note: Settings can be retrieved for Google Cloud projects, folders, organizations, and billing accounts.",
               "location": "path",
               "pattern": "^billingAccounts/[^/]+$",
               "required": true,
@@ -1840,7 +1840,7 @@
     "folders": {
       "methods": {
         "getCmekSettings": {
-          "description": "Gets the Logging CMEK settings for the given resource.Note: CMEK for the Log Router can be configured for Google Cloud projects, folders, organizations and billing accounts. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.",
+          "description": "Gets the Logging CMEK settings for the given resource.Note: CMEK for the Log Router can be configured for Google Cloud projects, folders, organizations, and billing accounts. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.",
           "flatPath": "v2/folders/{foldersId}/cmekSettings",
           "httpMethod": "GET",
           "id": "logging.folders.getCmekSettings",
@@ -1849,7 +1849,7 @@
           ],
           "parameters": {
             "name": {
-              "description": "Required. The resource for which to retrieve CMEK settings. \"projects/[PROJECT_ID]/cmekSettings\" \"organizations/[ORGANIZATION_ID]/cmekSettings\" \"billingAccounts/[BILLING_ACCOUNT_ID]/cmekSettings\" \"folders/[FOLDER_ID]/cmekSettings\" For example:\"organizations/12345/cmekSettings\"Note: CMEK for the Log Router can be configured for Google Cloud projects, folders, organizations and billing accounts. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.",
+              "description": "Required. The resource for which to retrieve CMEK settings. \"projects/[PROJECT_ID]/cmekSettings\" \"organizations/[ORGANIZATION_ID]/cmekSettings\" \"billingAccounts/[BILLING_ACCOUNT_ID]/cmekSettings\" \"folders/[FOLDER_ID]/cmekSettings\" For example:\"organizations/12345/cmekSettings\"Note: CMEK for the Log Router can be configured for Google Cloud projects, folders, organizations, and billing accounts. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.",
               "location": "path",
               "pattern": "^folders/[^/]+$",
               "required": true,
@@ -1868,7 +1868,7 @@
           ]
         },
         "getSettings": {
-          "description": "Gets the Log Router settings for the given resource.Note: Settings for the Log Router can be get for Google Cloud projects, folders, organizations and billing accounts. Currently it can only be configured for organizations. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.",
+          "description": "Gets the settings for the given resource.Note: Settings can be retrieved for Google Cloud projects, folders, organizations, and billing accounts.See View default resource settings for Logging (https://cloud.google.com/logging/docs/default-settings#view-org-settings) for more information.",
           "flatPath": "v2/folders/{foldersId}/settings",
           "httpMethod": "GET",
           "id": "logging.folders.getSettings",
@@ -1877,7 +1877,7 @@
           ],
           "parameters": {
             "name": {
-              "description": "Required. The resource for which to retrieve settings. \"projects/[PROJECT_ID]/settings\" \"organizations/[ORGANIZATION_ID]/settings\" \"billingAccounts/[BILLING_ACCOUNT_ID]/settings\" \"folders/[FOLDER_ID]/settings\" For example:\"organizations/12345/settings\"Note: Settings for the Log Router can be get for Google Cloud projects, folders, organizations and billing accounts. Currently it can only be configured for organizations. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.",
+              "description": "Required. The resource for which to retrieve settings. \"projects/[PROJECT_ID]/settings\" \"organizations/[ORGANIZATION_ID]/settings\" \"billingAccounts/[BILLING_ACCOUNT_ID]/settings\" \"folders/[FOLDER_ID]/settings\" For example:\"organizations/12345/settings\"Note: Settings can be retrieved for Google Cloud projects, folders, organizations, and billing accounts.",
               "location": "path",
               "pattern": "^folders/[^/]+$",
               "required": true,
@@ -1896,7 +1896,7 @@
           ]
         },
         "updateSettings": {
-          "description": "Updates the Log Router settings for the given resource.Note: Settings for the Log Router can currently only be configured for Google Cloud organizations. Once configured, it applies to all projects and folders in the Google Cloud organization.UpdateSettings will fail if 1) kms_key_name is invalid, or 2) the associated service account does not have the required roles/cloudkms.cryptoKeyEncrypterDecrypter role assigned for the key, or 3) access to the key is disabled. 4) location_id is not supported by Logging. 5) location_id violate OrgPolicy.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.",
+          "description": "Updates the settings for the given resource. This method applies to all feature configurations for organization and folders.UpdateSettings will fail if 1) kms_key_name is invalid, 2) the associated service account does not have the required roles/cloudkms.cryptoKeyEncrypterDecrypter role assigned for the key, 3) access to the key is disabled, 4) storage_location is not supported by Logging, 5) storage_location violates the location OrgPolicy, or 6) default_sink_config is set but has an unspecified filter write mode.See Configure default settings for organizations and folders (https://cloud.google.com/logging/docs/default-settings) for more information.",
           "flatPath": "v2/folders/{foldersId}/settings",
           "httpMethod": "PATCH",
           "id": "logging.folders.updateSettings",
@@ -1905,7 +1905,7 @@
           ],
           "parameters": {
             "name": {
-              "description": "Required. The resource name for the settings to update. \"organizations/[ORGANIZATION_ID]/settings\" For example:\"organizations/12345/settings\"Note: Settings for the Log Router can currently only be configured for Google Cloud organizations. Once configured, it applies to all projects and folders in the Google Cloud organization.",
+              "description": "Required. The resource name for the settings to update. \"organizations/[ORGANIZATION_ID]/settings\" For example:\"organizations/12345/settings\"",
               "location": "path",
               "pattern": "^folders/[^/]+$",
               "required": true,
@@ -4197,7 +4197,7 @@
     "organizations": {
       "methods": {
         "getCmekSettings": {
-          "description": "Gets the Logging CMEK settings for the given resource.Note: CMEK for the Log Router can be configured for Google Cloud projects, folders, organizations and billing accounts. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.",
+          "description": "Gets the Logging CMEK settings for the given resource.Note: CMEK for the Log Router can be configured for Google Cloud projects, folders, organizations, and billing accounts. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.",
           "flatPath": "v2/organizations/{organizationsId}/cmekSettings",
           "httpMethod": "GET",
           "id": "logging.organizations.getCmekSettings",
@@ -4206,7 +4206,7 @@
           ],
           "parameters": {
             "name": {
-              "description": "Required. The resource for which to retrieve CMEK settings. \"projects/[PROJECT_ID]/cmekSettings\" \"organizations/[ORGANIZATION_ID]/cmekSettings\" \"billingAccounts/[BILLING_ACCOUNT_ID]/cmekSettings\" \"folders/[FOLDER_ID]/cmekSettings\" For example:\"organizations/12345/cmekSettings\"Note: CMEK for the Log Router can be configured for Google Cloud projects, folders, organizations and billing accounts. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.",
+              "description": "Required. The resource for which to retrieve CMEK settings. \"projects/[PROJECT_ID]/cmekSettings\" \"organizations/[ORGANIZATION_ID]/cmekSettings\" \"billingAccounts/[BILLING_ACCOUNT_ID]/cmekSettings\" \"folders/[FOLDER_ID]/cmekSettings\" For example:\"organizations/12345/cmekSettings\"Note: CMEK for the Log Router can be configured for Google Cloud projects, folders, organizations, and billing accounts. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.",
               "location": "path",
               "pattern": "^organizations/[^/]+$",
               "required": true,
@@ -4225,7 +4225,7 @@
           ]
         },
         "getSettings": {
-          "description": "Gets the Log Router settings for the given resource.Note: Settings for the Log Router can be get for Google Cloud projects, folders, organizations and billing accounts. Currently it can only be configured for organizations. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.",
+          "description": "Gets the settings for the given resource.Note: Settings can be retrieved for Google Cloud projects, folders, organizations, and billing accounts.See View default resource settings for Logging (https://cloud.google.com/logging/docs/default-settings#view-org-settings) for more information.",
           "flatPath": "v2/organizations/{organizationsId}/settings",
           "httpMethod": "GET",
           "id": "logging.organizations.getSettings",
@@ -4234,7 +4234,7 @@
           ],
           "parameters": {
             "name": {
-              "description": "Required. The resource for which to retrieve settings. \"projects/[PROJECT_ID]/settings\" \"organizations/[ORGANIZATION_ID]/settings\" \"billingAccounts/[BILLING_ACCOUNT_ID]/settings\" \"folders/[FOLDER_ID]/settings\" For example:\"organizations/12345/settings\"Note: Settings for the Log Router can be get for Google Cloud projects, folders, organizations and billing accounts. Currently it can only be configured for organizations. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.",
+              "description": "Required. The resource for which to retrieve settings. \"projects/[PROJECT_ID]/settings\" \"organizations/[ORGANIZATION_ID]/settings\" \"billingAccounts/[BILLING_ACCOUNT_ID]/settings\" \"folders/[FOLDER_ID]/settings\" For example:\"organizations/12345/settings\"Note: Settings can be retrieved for Google Cloud projects, folders, organizations, and billing accounts.",
               "location": "path",
               "pattern": "^organizations/[^/]+$",
               "required": true,
@@ -4288,7 +4288,7 @@
           ]
         },
         "updateSettings": {
-          "description": "Updates the Log Router settings for the given resource.Note: Settings for the Log Router can currently only be configured for Google Cloud organizations. Once configured, it applies to all projects and folders in the Google Cloud organization.UpdateSettings will fail if 1) kms_key_name is invalid, or 2) the associated service account does not have the required roles/cloudkms.cryptoKeyEncrypterDecrypter role assigned for the key, or 3) access to the key is disabled. 4) location_id is not supported by Logging. 5) location_id violate OrgPolicy.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.",
+          "description": "Updates the settings for the given resource. This method applies to all feature configurations for organization and folders.UpdateSettings will fail if 1) kms_key_name is invalid, 2) the associated service account does not have the required roles/cloudkms.cryptoKeyEncrypterDecrypter role assigned for the key, 3) access to the key is disabled, 4) storage_location is not supported by Logging, 5) storage_location violates the location OrgPolicy, or 6) default_sink_config is set but has an unspecified filter write mode.See Configure default settings for organizations and folders (https://cloud.google.com/logging/docs/default-settings) for more information.",
           "flatPath": "v2/organizations/{organizationsId}/settings",
           "httpMethod": "PATCH",
           "id": "logging.organizations.updateSettings",
@@ -4297,7 +4297,7 @@
           ],
           "parameters": {
             "name": {
-              "description": "Required. The resource name for the settings to update. \"organizations/[ORGANIZATION_ID]/settings\" For example:\"organizations/12345/settings\"Note: Settings for the Log Router can currently only be configured for Google Cloud organizations. Once configured, it applies to all projects and folders in the Google Cloud organization.",
+              "description": "Required. The resource name for the settings to update. \"organizations/[ORGANIZATION_ID]/settings\" For example:\"organizations/12345/settings\"",
               "location": "path",
               "pattern": "^organizations/[^/]+$",
               "required": true,
@@ -5734,7 +5734,7 @@
     "projects": {
       "methods": {
         "getCmekSettings": {
-          "description": "Gets the Logging CMEK settings for the given resource.Note: CMEK for the Log Router can be configured for Google Cloud projects, folders, organizations and billing accounts. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.",
+          "description": "Gets the Logging CMEK settings for the given resource.Note: CMEK for the Log Router can be configured for Google Cloud projects, folders, organizations, and billing accounts. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.",
           "flatPath": "v2/projects/{projectsId}/cmekSettings",
           "httpMethod": "GET",
           "id": "logging.projects.getCmekSettings",
@@ -5743,7 +5743,7 @@
           ],
           "parameters": {
             "name": {
-              "description": "Required. The resource for which to retrieve CMEK settings. \"projects/[PROJECT_ID]/cmekSettings\" \"organizations/[ORGANIZATION_ID]/cmekSettings\" \"billingAccounts/[BILLING_ACCOUNT_ID]/cmekSettings\" \"folders/[FOLDER_ID]/cmekSettings\" For example:\"organizations/12345/cmekSettings\"Note: CMEK for the Log Router can be configured for Google Cloud projects, folders, organizations and billing accounts. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.",
+              "description": "Required. The resource for which to retrieve CMEK settings. \"projects/[PROJECT_ID]/cmekSettings\" \"organizations/[ORGANIZATION_ID]/cmekSettings\" \"billingAccounts/[BILLING_ACCOUNT_ID]/cmekSettings\" \"folders/[FOLDER_ID]/cmekSettings\" For example:\"organizations/12345/cmekSettings\"Note: CMEK for the Log Router can be configured for Google Cloud projects, folders, organizations, and billing accounts. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.",
               "location": "path",
               "pattern": "^projects/[^/]+$",
               "required": true,
@@ -5762,7 +5762,7 @@
           ]
         },
         "getSettings": {
-          "description": "Gets the Log Router settings for the given resource.Note: Settings for the Log Router can be get for Google Cloud projects, folders, organizations and billing accounts. Currently it can only be configured for organizations. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.",
+          "description": "Gets the settings for the given resource.Note: Settings can be retrieved for Google Cloud projects, folders, organizations, and billing accounts.See View default resource settings for Logging (https://cloud.google.com/logging/docs/default-settings#view-org-settings) for more information.",
           "flatPath": "v2/projects/{projectsId}/settings",
           "httpMethod": "GET",
           "id": "logging.projects.getSettings",
@@ -5771,7 +5771,7 @@
           ],
           "parameters": {
             "name": {
-              "description": "Required. The resource for which to retrieve settings. \"projects/[PROJECT_ID]/settings\" \"organizations/[ORGANIZATION_ID]/settings\" \"billingAccounts/[BILLING_ACCOUNT_ID]/settings\" \"folders/[FOLDER_ID]/settings\" For example:\"organizations/12345/settings\"Note: Settings for the Log Router can be get for Google Cloud projects, folders, organizations and billing accounts. Currently it can only be configured for organizations. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.",
+              "description": "Required. The resource for which to retrieve settings. \"projects/[PROJECT_ID]/settings\" \"organizations/[ORGANIZATION_ID]/settings\" \"billingAccounts/[BILLING_ACCOUNT_ID]/settings\" \"folders/[FOLDER_ID]/settings\" For example:\"organizations/12345/settings\"Note: Settings can be retrieved for Google Cloud projects, folders, organizations, and billing accounts.",
               "location": "path",
               "pattern": "^projects/[^/]+$",
               "required": true,
@@ -7540,7 +7540,7 @@
     "v2": {
       "methods": {
         "getCmekSettings": {
-          "description": "Gets the Logging CMEK settings for the given resource.Note: CMEK for the Log Router can be configured for Google Cloud projects, folders, organizations and billing accounts. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.",
+          "description": "Gets the Logging CMEK settings for the given resource.Note: CMEK for the Log Router can be configured for Google Cloud projects, folders, organizations, and billing accounts. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.",
           "flatPath": "v2/{v2Id}/{v2Id1}/cmekSettings",
           "httpMethod": "GET",
           "id": "logging.getCmekSettings",
@@ -7549,7 +7549,7 @@
           ],
           "parameters": {
             "name": {
-              "description": "Required. The resource for which to retrieve CMEK settings. \"projects/[PROJECT_ID]/cmekSettings\" \"organizations/[ORGANIZATION_ID]/cmekSettings\" \"billingAccounts/[BILLING_ACCOUNT_ID]/cmekSettings\" \"folders/[FOLDER_ID]/cmekSettings\" For example:\"organizations/12345/cmekSettings\"Note: CMEK for the Log Router can be configured for Google Cloud projects, folders, organizations and billing accounts. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.",
+              "description": "Required. The resource for which to retrieve CMEK settings. \"projects/[PROJECT_ID]/cmekSettings\" \"organizations/[ORGANIZATION_ID]/cmekSettings\" \"billingAccounts/[BILLING_ACCOUNT_ID]/cmekSettings\" \"folders/[FOLDER_ID]/cmekSettings\" For example:\"organizations/12345/cmekSettings\"Note: CMEK for the Log Router can be configured for Google Cloud projects, folders, organizations, and billing accounts. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.",
               "location": "path",
               "pattern": "^[^/]+/[^/]+$",
               "required": true,
@@ -7568,7 +7568,7 @@
           ]
         },
         "getSettings": {
-          "description": "Gets the Log Router settings for the given resource.Note: Settings for the Log Router can be get for Google Cloud projects, folders, organizations and billing accounts. Currently it can only be configured for organizations. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.",
+          "description": "Gets the settings for the given resource.Note: Settings can be retrieved for Google Cloud projects, folders, organizations, and billing accounts.See View default resource settings for Logging (https://cloud.google.com/logging/docs/default-settings#view-org-settings) for more information.",
           "flatPath": "v2/{v2Id}/{v2Id1}/settings",
           "httpMethod": "GET",
           "id": "logging.getSettings",
@@ -7577,7 +7577,7 @@
           ],
           "parameters": {
             "name": {
-              "description": "Required. The resource for which to retrieve settings. \"projects/[PROJECT_ID]/settings\" \"organizations/[ORGANIZATION_ID]/settings\" \"billingAccounts/[BILLING_ACCOUNT_ID]/settings\" \"folders/[FOLDER_ID]/settings\" For example:\"organizations/12345/settings\"Note: Settings for the Log Router can be get for Google Cloud projects, folders, organizations and billing accounts. Currently it can only be configured for organizations. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.",
+              "description": "Required. The resource for which to retrieve settings. \"projects/[PROJECT_ID]/settings\" \"organizations/[ORGANIZATION_ID]/settings\" \"billingAccounts/[BILLING_ACCOUNT_ID]/settings\" \"folders/[FOLDER_ID]/settings\" For example:\"organizations/12345/settings\"Note: Settings can be retrieved for Google Cloud projects, folders, organizations, and billing accounts.",
               "location": "path",
               "pattern": "^[^/]+/[^/]+$",
               "required": true,
@@ -7631,7 +7631,7 @@
           ]
         },
         "updateSettings": {
-          "description": "Updates the Log Router settings for the given resource.Note: Settings for the Log Router can currently only be configured for Google Cloud organizations. Once configured, it applies to all projects and folders in the Google Cloud organization.UpdateSettings will fail if 1) kms_key_name is invalid, or 2) the associated service account does not have the required roles/cloudkms.cryptoKeyEncrypterDecrypter role assigned for the key, or 3) access to the key is disabled. 4) location_id is not supported by Logging. 5) location_id violate OrgPolicy.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.",
+          "description": "Updates the settings for the given resource. This method applies to all feature configurations for organization and folders.UpdateSettings will fail if 1) kms_key_name is invalid, 2) the associated service account does not have the required roles/cloudkms.cryptoKeyEncrypterDecrypter role assigned for the key, 3) access to the key is disabled, 4) storage_location is not supported by Logging, 5) storage_location violates the location OrgPolicy, or 6) default_sink_config is set but has an unspecified filter write mode.See Configure default settings for organizations and folders (https://cloud.google.com/logging/docs/default-settings) for more information.",
           "flatPath": "v2/{v2Id}/{v2Id1}/settings",
           "httpMethod": "PATCH",
           "id": "logging.updateSettings",
@@ -7640,7 +7640,7 @@
           ],
           "parameters": {
             "name": {
-              "description": "Required. The resource name for the settings to update. \"organizations/[ORGANIZATION_ID]/settings\" For example:\"organizations/12345/settings\"Note: Settings for the Log Router can currently only be configured for Google Cloud organizations. Once configured, it applies to all projects and folders in the Google Cloud organization.",
+              "description": "Required. The resource name for the settings to update. \"organizations/[ORGANIZATION_ID]/settings\" For example:\"organizations/12345/settings\"",
               "location": "path",
               "pattern": "^[^/]+/[^/]+$",
               "required": true,
@@ -7668,7 +7668,7 @@
       }
     }
   },
-  "revision": "20231110",
+  "revision": "20231130",
   "rootUrl": "https://logging.googleapis.com/",
   "schemas": {
     "BigQueryDataset": {
@@ -9664,7 +9664,7 @@
       "type": "object"
     },
     "Settings": {
-      "description": "Describes the settings associated with a project, folder, organization, billing account, or flexible resource.",
+      "description": "Describes the settings associated with a project, folder, organization, or billing account.",
       "id": "Settings",
       "properties": {
         "defaultSinkConfig": {
@@ -9676,11 +9676,11 @@
           "type": "boolean"
         },
         "kmsKeyName": {
-          "description": "Optional. The resource name for the configured Cloud KMS key.KMS key name format: \"projects/[PROJECT_ID]/locations/[LOCATION]/keyRings/[KEYRING]/cryptoKeys/[KEY]\" For example:\"projects/my-project/locations/us-central1/keyRings/my-ring/cryptoKeys/my-key\"To enable CMEK for the Log Router, set this field to a valid kms_key_name for which the associated service account has the required roles/cloudkms.cryptoKeyEncrypterDecrypter role assigned for the key.The Cloud KMS key used by the Log Router can be updated by changing the kms_key_name to a new valid key name. Encryption operations that are in progress will be completed with the key that was in use when they started. Decryption operations will be completed using the key that was used at the time of encryption unless access to that key has been revoked.To disable CMEK for the Log Router, set this field to an empty string.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.",
+          "description": "Optional. The resource name for the configured Cloud KMS key.KMS key name format: \"projects/[PROJECT_ID]/locations/[LOCATION]/keyRings/[KEYRING]/cryptoKeys/[KEY]\" For example:\"projects/my-project/locations/us-central1/keyRings/my-ring/cryptoKeys/my-key\"To enable CMEK, set this field to a valid kms_key_name for which the associated service account has the required roles/cloudkms.cryptoKeyEncrypterDecrypter role assigned for the key.The Cloud KMS key used by the Log Router can be updated by changing the kms_key_name to a new valid key name.To disable CMEK for the Log Router, set this field to an empty string.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.",
           "type": "string"
         },
         "kmsServiceAccountId": {
-          "description": "Output only. The service account that will be used by the Log Router to access your Cloud KMS key.Before enabling CMEK for Log Router, you must first assign the role roles/cloudkms.cryptoKeyEncrypterDecrypter to the service account that the Log Router will use to access your Cloud KMS key. Use GetSettings to obtain the service account ID.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.",
+          "description": "Output only. The service account that will be used by the Log Router to access your Cloud KMS key.Before enabling CMEK, you must first assign the role roles/cloudkms.cryptoKeyEncrypterDecrypter to the service account that will be used to access your Cloud KMS key. Use GetSettings to obtain the service account ID.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.",
           "readOnly": true,
           "type": "string"
         },
diff --git a/googleapiclient/discovery_cache/documents/manufacturers.v1.json b/googleapiclient/discovery_cache/documents/manufacturers.v1.json
index 426bf5d246b..7228c93a0c6 100644
--- a/googleapiclient/discovery_cache/documents/manufacturers.v1.json
+++ b/googleapiclient/discovery_cache/documents/manufacturers.v1.json
@@ -416,7 +416,7 @@
       }
     }
   },
-  "revision": "20231113",
+  "revision": "20231211",
   "rootUrl": "https://manufacturers.googleapis.com/",
   "schemas": {
     "Attributes": {
diff --git a/googleapiclient/discovery_cache/documents/metastore.v1.json b/googleapiclient/discovery_cache/documents/metastore.v1.json
index 470f93619a6..9effee7fb30 100644
--- a/googleapiclient/discovery_cache/documents/metastore.v1.json
+++ b/googleapiclient/discovery_cache/documents/metastore.v1.json
@@ -599,6 +599,34 @@
                     "https://www.googleapis.com/auth/cloud-platform"
                   ]
                 },
+                "alterTableProperties": {
+                  "description": "Alter metadata table properties.",
+                  "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/services/{servicesId}:alterTableProperties",
+                  "httpMethod": "POST",
+                  "id": "metastore.projects.locations.services.alterTableProperties",
+                  "parameterOrder": [
+                    "service"
+                  ],
+                  "parameters": {
+                    "service": {
+                      "description": "Required. The relative resource name of the Dataproc Metastore service that's being used to mutate metadata table properties, in the following format:projects/{project_id}/locations/{location_id}/services/{service_id}.",
+                      "location": "path",
+                      "pattern": "^projects/[^/]+/locations/[^/]+/services/[^/]+$",
+                      "required": true,
+                      "type": "string"
+                    }
+                  },
+                  "path": "v1/{+service}:alterTableProperties",
+                  "request": {
+                    "$ref": "AlterTablePropertiesRequest"
+                  },
+                  "response": {
+                    "$ref": "Operation"
+                  },
+                  "scopes": [
+                    "https://www.googleapis.com/auth/cloud-platform"
+                  ]
+                },
                 "create": {
                   "description": "Creates a metastore service in a project and location.",
                   "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/services",
@@ -1339,7 +1367,7 @@
       }
     }
   },
-  "revision": "20231126",
+  "revision": "20231205",
   "rootUrl": "https://metastore.googleapis.com/",
   "schemas": {
     "AlterMetadataResourceLocationRequest": {
@@ -1363,6 +1391,29 @@
       "properties": {},
       "type": "object"
     },
+    "AlterTablePropertiesRequest": {
+      "description": "Request message for DataprocMetastore.AlterTableProperties.",
+      "id": "AlterTablePropertiesRequest",
+      "properties": {
+        "properties": {
+          "additionalProperties": {
+            "type": "string"
+          },
+          "description": "A map that describes the desired values to mutate. If update_mask is empty, the properties will not update. Otherwise, the properties only alters the value whose associated paths exist in the update mask",
+          "type": "object"
+        },
+        "tableName": {
+          "description": "Required. The name of the table containing the properties you're altering in the following format.databases/{database_id}/tables/{table_id}",
+          "type": "string"
+        },
+        "updateMask": {
+          "description": "A field mask that specifies the metadata table properties that are overwritten by the update. Fields specified in the update_mask are relative to the resource (not to the full request). A field is overwritten if it is in the mask.For example, given the target properties: properties { a: 1 b: 2 } And an update properties: properties { a: 2 b: 3 c: 4 } then if the field mask is:paths: \"properties.b\", \"properties.c\"then the result will be: properties { a: 1 b: 3 c: 4 } ",
+          "format": "google-fieldmask",
+          "type": "string"
+        }
+      },
+      "type": "object"
+    },
     "AuditConfig": {
       "description": "Specifies the audit configuration for a service. The configuration determines which permission types are logged, and what identities, if any, are exempted from logging. An AuditConfig must have one or more AuditLogConfigs.If there are AuditConfigs for both allServices and a specific service, the union of the two AuditConfigs is used for that service: the log_types specified in each AuditConfig are enabled, and the exempted_members in each AuditLogConfig are exempted.Example Policy with multiple AuditConfigs: { \"audit_configs\": [ { \"service\": \"allServices\", \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\", \"exempted_members\": [ \"user:jose@example.com\" ] }, { \"log_type\": \"DATA_WRITE\" }, { \"log_type\": \"ADMIN_READ\" } ] }, { \"service\": \"sampleservice.googleapis.com\", \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\" }, { \"log_type\": \"DATA_WRITE\", \"exempted_members\": [ \"user:aliya@example.com\" ] } ] } ] } For sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ logging. It also exempts jose@example.com from DATA_READ logging, and aliya@example.com from DATA_WRITE logging.",
       "id": "AuditConfig",
@@ -2426,10 +2477,6 @@
           "readOnly": true,
           "type": "string"
         },
-        "backupLocation": {
-          "description": "Optional. A Cloud Storage URI specifying where the backup artifacts are stored, in the format gs:///.",
-          "type": "string"
-        },
         "details": {
           "description": "Output only. The restore details containing the revision of the service to be restored to, in format of JSON.",
           "readOnly": true,
diff --git a/googleapiclient/discovery_cache/documents/metastore.v1alpha.json b/googleapiclient/discovery_cache/documents/metastore.v1alpha.json
index a2300ef27bf..7a9f47f4e38 100644
--- a/googleapiclient/discovery_cache/documents/metastore.v1alpha.json
+++ b/googleapiclient/discovery_cache/documents/metastore.v1alpha.json
@@ -599,6 +599,34 @@
                     "https://www.googleapis.com/auth/cloud-platform"
                   ]
                 },
+                "alterTableProperties": {
+                  "description": "Alter metadata table properties.",
+                  "flatPath": "v1alpha/projects/{projectsId}/locations/{locationsId}/services/{servicesId}:alterTableProperties",
+                  "httpMethod": "POST",
+                  "id": "metastore.projects.locations.services.alterTableProperties",
+                  "parameterOrder": [
+                    "service"
+                  ],
+                  "parameters": {
+                    "service": {
+                      "description": "Required. The relative resource name of the Dataproc Metastore service that's being used to mutate metadata table properties, in the following format:projects/{project_id}/locations/{location_id}/services/{service_id}.",
+                      "location": "path",
+                      "pattern": "^projects/[^/]+/locations/[^/]+/services/[^/]+$",
+                      "required": true,
+                      "type": "string"
+                    }
+                  },
+                  "path": "v1alpha/{+service}:alterTableProperties",
+                  "request": {
+                    "$ref": "AlterTablePropertiesRequest"
+                  },
+                  "response": {
+                    "$ref": "Operation"
+                  },
+                  "scopes": [
+                    "https://www.googleapis.com/auth/cloud-platform"
+                  ]
+                },
                 "create": {
                   "description": "Creates a metastore service in a project and location.",
                   "flatPath": "v1alpha/projects/{projectsId}/locations/{locationsId}/services",
@@ -1579,7 +1607,7 @@
       }
     }
   },
-  "revision": "20231126",
+  "revision": "20231205",
   "rootUrl": "https://metastore.googleapis.com/",
   "schemas": {
     "AlterMetadataResourceLocationRequest": {
@@ -1603,6 +1631,29 @@
       "properties": {},
       "type": "object"
     },
+    "AlterTablePropertiesRequest": {
+      "description": "Request message for DataprocMetastore.AlterTableProperties.",
+      "id": "AlterTablePropertiesRequest",
+      "properties": {
+        "properties": {
+          "additionalProperties": {
+            "type": "string"
+          },
+          "description": "A map that describes the desired values to mutate. If update_mask is empty, the properties will not update. Otherwise, the properties only alters the value whose associated paths exist in the update mask",
+          "type": "object"
+        },
+        "tableName": {
+          "description": "Required. The name of the table containing the properties you're altering in the following format.databases/{database_id}/tables/{table_id}",
+          "type": "string"
+        },
+        "updateMask": {
+          "description": "A field mask that specifies the metadata table properties that are overwritten by the update. Fields specified in the update_mask are relative to the resource (not to the full request). A field is overwritten if it is in the mask.For example, given the target properties: properties { a: 1 b: 2 } And an update properties: properties { a: 2 b: 3 c: 4 } then if the field mask is:paths: \"properties.b\", \"properties.c\"then the result will be: properties { a: 1 b: 3 c: 4 } ",
+          "format": "google-fieldmask",
+          "type": "string"
+        }
+      },
+      "type": "object"
+    },
     "AuditConfig": {
       "description": "Specifies the audit configuration for a service. The configuration determines which permission types are logged, and what identities, if any, are exempted from logging. An AuditConfig must have one or more AuditLogConfigs.If there are AuditConfigs for both allServices and a specific service, the union of the two AuditConfigs is used for that service: the log_types specified in each AuditConfig are enabled, and the exempted_members in each AuditLogConfig are exempted.Example Policy with multiple AuditConfigs: { \"audit_configs\": [ { \"service\": \"allServices\", \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\", \"exempted_members\": [ \"user:jose@example.com\" ] }, { \"log_type\": \"DATA_WRITE\" }, { \"log_type\": \"ADMIN_READ\" } ] }, { \"service\": \"sampleservice.googleapis.com\", \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\" }, { \"log_type\": \"DATA_WRITE\", \"exempted_members\": [ \"user:aliya@example.com\" ] } ] } ] } For sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ logging. It also exempts jose@example.com from DATA_READ logging, and aliya@example.com from DATA_WRITE logging.",
       "id": "AuditConfig",
@@ -2723,10 +2774,6 @@
           "readOnly": true,
           "type": "string"
         },
-        "backupLocation": {
-          "description": "Optional. A Cloud Storage URI specifying where the backup artifacts are stored, in the format gs:///.",
-          "type": "string"
-        },
         "details": {
           "description": "Output only. The restore details containing the revision of the service to be restored to, in format of JSON.",
           "readOnly": true,
diff --git a/googleapiclient/discovery_cache/documents/metastore.v1beta.json b/googleapiclient/discovery_cache/documents/metastore.v1beta.json
index b07f97372c6..f2b99ab333d 100644
--- a/googleapiclient/discovery_cache/documents/metastore.v1beta.json
+++ b/googleapiclient/discovery_cache/documents/metastore.v1beta.json
@@ -599,6 +599,34 @@
                     "https://www.googleapis.com/auth/cloud-platform"
                   ]
                 },
+                "alterTableProperties": {
+                  "description": "Alter metadata table properties.",
+                  "flatPath": "v1beta/projects/{projectsId}/locations/{locationsId}/services/{servicesId}:alterTableProperties",
+                  "httpMethod": "POST",
+                  "id": "metastore.projects.locations.services.alterTableProperties",
+                  "parameterOrder": [
+                    "service"
+                  ],
+                  "parameters": {
+                    "service": {
+                      "description": "Required. The relative resource name of the Dataproc Metastore service that's being used to mutate metadata table properties, in the following format:projects/{project_id}/locations/{location_id}/services/{service_id}.",
+                      "location": "path",
+                      "pattern": "^projects/[^/]+/locations/[^/]+/services/[^/]+$",
+                      "required": true,
+                      "type": "string"
+                    }
+                  },
+                  "path": "v1beta/{+service}:alterTableProperties",
+                  "request": {
+                    "$ref": "AlterTablePropertiesRequest"
+                  },
+                  "response": {
+                    "$ref": "Operation"
+                  },
+                  "scopes": [
+                    "https://www.googleapis.com/auth/cloud-platform"
+                  ]
+                },
                 "create": {
                   "description": "Creates a metastore service in a project and location.",
                   "flatPath": "v1beta/projects/{projectsId}/locations/{locationsId}/services",
@@ -1579,7 +1607,7 @@
       }
     }
   },
-  "revision": "20231126",
+  "revision": "20231205",
   "rootUrl": "https://metastore.googleapis.com/",
   "schemas": {
     "AlterMetadataResourceLocationRequest": {
@@ -1603,6 +1631,29 @@
       "properties": {},
       "type": "object"
     },
+    "AlterTablePropertiesRequest": {
+      "description": "Request message for DataprocMetastore.AlterTableProperties.",
+      "id": "AlterTablePropertiesRequest",
+      "properties": {
+        "properties": {
+          "additionalProperties": {
+            "type": "string"
+          },
+          "description": "A map that describes the desired values to mutate. If update_mask is empty, the properties will not update. Otherwise, the properties only alters the value whose associated paths exist in the update mask",
+          "type": "object"
+        },
+        "tableName": {
+          "description": "Required. The name of the table containing the properties you're altering in the following format.databases/{database_id}/tables/{table_id}",
+          "type": "string"
+        },
+        "updateMask": {
+          "description": "A field mask that specifies the metadata table properties that are overwritten by the update. Fields specified in the update_mask are relative to the resource (not to the full request). A field is overwritten if it is in the mask.For example, given the target properties: properties { a: 1 b: 2 } And an update properties: properties { a: 2 b: 3 c: 4 } then if the field mask is:paths: \"properties.b\", \"properties.c\"then the result will be: properties { a: 1 b: 3 c: 4 } ",
+          "format": "google-fieldmask",
+          "type": "string"
+        }
+      },
+      "type": "object"
+    },
     "AuditConfig": {
       "description": "Specifies the audit configuration for a service. The configuration determines which permission types are logged, and what identities, if any, are exempted from logging. An AuditConfig must have one or more AuditLogConfigs.If there are AuditConfigs for both allServices and a specific service, the union of the two AuditConfigs is used for that service: the log_types specified in each AuditConfig are enabled, and the exempted_members in each AuditLogConfig are exempted.Example Policy with multiple AuditConfigs: { \"audit_configs\": [ { \"service\": \"allServices\", \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\", \"exempted_members\": [ \"user:jose@example.com\" ] }, { \"log_type\": \"DATA_WRITE\" }, { \"log_type\": \"ADMIN_READ\" } ] }, { \"service\": \"sampleservice.googleapis.com\", \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\" }, { \"log_type\": \"DATA_WRITE\", \"exempted_members\": [ \"user:aliya@example.com\" ] } ] } ] } For sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ logging. It also exempts jose@example.com from DATA_READ logging, and aliya@example.com from DATA_WRITE logging.",
       "id": "AuditConfig",
@@ -2723,10 +2774,6 @@
           "readOnly": true,
           "type": "string"
         },
-        "backupLocation": {
-          "description": "Optional. A Cloud Storage URI specifying where the backup artifacts are stored, in the format gs:///.",
-          "type": "string"
-        },
         "details": {
           "description": "Output only. The restore details containing the revision of the service to be restored to, in format of JSON.",
           "readOnly": true,
diff --git a/googleapiclient/discovery_cache/documents/migrationcenter.v1.json b/googleapiclient/discovery_cache/documents/migrationcenter.v1.json
index a153cce79b4..950472a56cc 100644
--- a/googleapiclient/discovery_cache/documents/migrationcenter.v1.json
+++ b/googleapiclient/discovery_cache/documents/migrationcenter.v1.json
@@ -2099,7 +2099,7 @@
       }
     }
   },
-  "revision": "20231101",
+  "revision": "20231201",
   "rootUrl": "https://migrationcenter.googleapis.com/",
   "schemas": {
     "AddAssetsToGroupRequest": {
@@ -4794,7 +4794,7 @@
         },
         "storageBytesHistogram": {
           "$ref": "ReportSummaryHistogramChartData",
-          "description": "Histogram showing a distribution of memory sizes."
+          "description": "Histogram showing a distribution of storage sizes."
         },
         "storageUtilizationChart": {
           "$ref": "ReportSummaryUtilizationChartData",
diff --git a/googleapiclient/discovery_cache/documents/migrationcenter.v1alpha1.json b/googleapiclient/discovery_cache/documents/migrationcenter.v1alpha1.json
index 66fe9e7778f..3a6ed04463f 100644
--- a/googleapiclient/discovery_cache/documents/migrationcenter.v1alpha1.json
+++ b/googleapiclient/discovery_cache/documents/migrationcenter.v1alpha1.json
@@ -2107,7 +2107,7 @@
       }
     }
   },
-  "revision": "20231101",
+  "revision": "20231201",
   "rootUrl": "https://migrationcenter.googleapis.com/",
   "schemas": {
     "AddAssetsToGroupRequest": {
@@ -4813,7 +4813,7 @@
         },
         "storageBytesHistogram": {
           "$ref": "ReportSummaryHistogramChartData",
-          "description": "Histogram showing a distribution of memory sizes."
+          "description": "Histogram showing a distribution of storage sizes."
         },
         "storageUtilization": {
           "$ref": "ReportSummaryChartData",
diff --git a/googleapiclient/discovery_cache/documents/monitoring.v1.json b/googleapiclient/discovery_cache/documents/monitoring.v1.json
index e1a9eefb4bb..f1d3c9dcf77 100644
--- a/googleapiclient/discovery_cache/documents/monitoring.v1.json
+++ b/googleapiclient/discovery_cache/documents/monitoring.v1.json
@@ -753,7 +753,7 @@
       }
     }
   },
-  "revision": "20231205",
+  "revision": "20231208",
   "rootUrl": "https://monitoring.googleapis.com/",
   "schemas": {
     "Aggregation": {
@@ -2029,6 +2029,21 @@
       },
       "type": "object"
     },
+    "SectionHeader": {
+      "description": "A widget that defines a new section header. Sections populate a table of contents and allow easier navigation of long-form content.",
+      "id": "SectionHeader",
+      "properties": {
+        "dividerBelow": {
+          "description": "Whether to insert a divider below the section in the table of contents",
+          "type": "boolean"
+        },
+        "subtitle": {
+          "description": "The subtitle of the section",
+          "type": "string"
+        }
+      },
+      "type": "object"
+    },
     "SourceContext": {
       "description": "SourceContext represents information about the source of a protobuf element, like the file in which it is defined.",
       "id": "SourceContext",
@@ -2626,6 +2641,10 @@
           "$ref": "Scorecard",
           "description": "A scorecard summarizing time series data."
         },
+        "sectionHeader": {
+          "$ref": "SectionHeader",
+          "description": "A widget that defines a section header for easier navigation of the dashboard."
+        },
         "text": {
           "$ref": "Text",
           "description": "A raw string or markdown displaying textual content."
diff --git a/googleapiclient/discovery_cache/documents/monitoring.v3.json b/googleapiclient/discovery_cache/documents/monitoring.v3.json
index 99b4d86e9ea..f17689cfdb6 100644
--- a/googleapiclient/discovery_cache/documents/monitoring.v3.json
+++ b/googleapiclient/discovery_cache/documents/monitoring.v3.json
@@ -2714,7 +2714,7 @@
       }
     }
   },
-  "revision": "20231205",
+  "revision": "20231208",
   "rootUrl": "https://monitoring.googleapis.com/",
   "schemas": {
     "Aggregation": {
diff --git a/googleapiclient/discovery_cache/documents/mybusinessaccountmanagement.v1.json b/googleapiclient/discovery_cache/documents/mybusinessaccountmanagement.v1.json
index a1552e804f1..83013dd8272 100644
--- a/googleapiclient/discovery_cache/documents/mybusinessaccountmanagement.v1.json
+++ b/googleapiclient/discovery_cache/documents/mybusinessaccountmanagement.v1.json
@@ -530,7 +530,7 @@
       }
     }
   },
-  "revision": "20231205",
+  "revision": "20231210",
   "rootUrl": "https://mybusinessaccountmanagement.googleapis.com/",
   "schemas": {
     "AcceptInvitationRequest": {
diff --git a/googleapiclient/discovery_cache/documents/mybusinessbusinessinformation.v1.json b/googleapiclient/discovery_cache/documents/mybusinessbusinessinformation.v1.json
index fa76f7b5037..63b4a809f38 100644
--- a/googleapiclient/discovery_cache/documents/mybusinessbusinessinformation.v1.json
+++ b/googleapiclient/discovery_cache/documents/mybusinessbusinessinformation.v1.json
@@ -612,7 +612,7 @@
       }
     }
   },
-  "revision": "20231205",
+  "revision": "20231210",
   "rootUrl": "https://mybusinessbusinessinformation.googleapis.com/",
   "schemas": {
     "AdWordsLocationExtensions": {
diff --git a/googleapiclient/discovery_cache/documents/mybusinesslodging.v1.json b/googleapiclient/discovery_cache/documents/mybusinesslodging.v1.json
index 09314bcf1b9..b0b03c2235c 100644
--- a/googleapiclient/discovery_cache/documents/mybusinesslodging.v1.json
+++ b/googleapiclient/discovery_cache/documents/mybusinesslodging.v1.json
@@ -194,7 +194,7 @@
       }
     }
   },
-  "revision": "20231205",
+  "revision": "20231210",
   "rootUrl": "https://mybusinesslodging.googleapis.com/",
   "schemas": {
     "Accessibility": {
diff --git a/googleapiclient/discovery_cache/documents/mybusinessnotifications.v1.json b/googleapiclient/discovery_cache/documents/mybusinessnotifications.v1.json
index 8b5baa7ecec..b89300a1106 100644
--- a/googleapiclient/discovery_cache/documents/mybusinessnotifications.v1.json
+++ b/googleapiclient/discovery_cache/documents/mybusinessnotifications.v1.json
@@ -154,7 +154,7 @@
       }
     }
   },
-  "revision": "20231205",
+  "revision": "20231210",
   "rootUrl": "https://mybusinessnotifications.googleapis.com/",
   "schemas": {
     "NotificationSetting": {
diff --git a/googleapiclient/discovery_cache/documents/mybusinessplaceactions.v1.json b/googleapiclient/discovery_cache/documents/mybusinessplaceactions.v1.json
index b38cdcd1316..98a68a09863 100644
--- a/googleapiclient/discovery_cache/documents/mybusinessplaceactions.v1.json
+++ b/googleapiclient/discovery_cache/documents/mybusinessplaceactions.v1.json
@@ -281,7 +281,7 @@
       }
     }
   },
-  "revision": "20231205",
+  "revision": "20231210",
   "rootUrl": "https://mybusinessplaceactions.googleapis.com/",
   "schemas": {
     "Empty": {
diff --git a/googleapiclient/discovery_cache/documents/mybusinessqanda.v1.json b/googleapiclient/discovery_cache/documents/mybusinessqanda.v1.json
index 27adcd726d2..42b9dcb2d6a 100644
--- a/googleapiclient/discovery_cache/documents/mybusinessqanda.v1.json
+++ b/googleapiclient/discovery_cache/documents/mybusinessqanda.v1.json
@@ -323,7 +323,7 @@
       }
     }
   },
-  "revision": "20231205",
+  "revision": "20231210",
   "rootUrl": "https://mybusinessqanda.googleapis.com/",
   "schemas": {
     "Answer": {
diff --git a/googleapiclient/discovery_cache/documents/mybusinessverifications.v1.json b/googleapiclient/discovery_cache/documents/mybusinessverifications.v1.json
index a5b4b30ed63..34bae9c2fdd 100644
--- a/googleapiclient/discovery_cache/documents/mybusinessverifications.v1.json
+++ b/googleapiclient/discovery_cache/documents/mybusinessverifications.v1.json
@@ -237,7 +237,7 @@
       }
     }
   },
-  "revision": "20231205",
+  "revision": "20231210",
   "rootUrl": "https://mybusinessverifications.googleapis.com/",
   "schemas": {
     "AddressVerificationData": {
diff --git a/googleapiclient/discovery_cache/documents/networkconnectivity.v1.json b/googleapiclient/discovery_cache/documents/networkconnectivity.v1.json
index c4273385383..49300e8437c 100644
--- a/googleapiclient/discovery_cache/documents/networkconnectivity.v1.json
+++ b/googleapiclient/discovery_cache/documents/networkconnectivity.v1.json
@@ -2630,7 +2630,7 @@
       }
     }
   },
-  "revision": "20231109",
+  "revision": "20231129",
   "rootUrl": "https://networkconnectivity.googleapis.com/",
   "schemas": {
     "AcceptHubSpokeRequest": {
diff --git a/googleapiclient/discovery_cache/documents/networkconnectivity.v1alpha1.json b/googleapiclient/discovery_cache/documents/networkconnectivity.v1alpha1.json
index 4a8f47919cf..3d88a59bece 100644
--- a/googleapiclient/discovery_cache/documents/networkconnectivity.v1alpha1.json
+++ b/googleapiclient/discovery_cache/documents/networkconnectivity.v1alpha1.json
@@ -1116,7 +1116,7 @@
       }
     }
   },
-  "revision": "20231109",
+  "revision": "20231129",
   "rootUrl": "https://networkconnectivity.googleapis.com/",
   "schemas": {
     "AuditConfig": {
diff --git a/googleapiclient/discovery_cache/documents/networkmanagement.v1.json b/googleapiclient/discovery_cache/documents/networkmanagement.v1.json
index 91929f105f9..4c90d4f5cb7 100644
--- a/googleapiclient/discovery_cache/documents/networkmanagement.v1.json
+++ b/googleapiclient/discovery_cache/documents/networkmanagement.v1.json
@@ -591,7 +591,7 @@
       }
     }
   },
-  "revision": "20231108",
+  "revision": "20231129",
   "rootUrl": "https://networkmanagement.googleapis.com/",
   "schemas": {
     "AbortInfo": {
@@ -997,12 +997,17 @@
             "NO_ROUTE",
             "ROUTE_BLACKHOLE",
             "ROUTE_WRONG_NETWORK",
+            "ROUTE_NEXT_HOP_IP_ADDRESS_NOT_RESOLVED",
+            "ROUTE_NEXT_HOP_RESOURCE_NOT_FOUND",
+            "NO_ROUTE_FROM_INTERNET_TO_PRIVATE_IPV6_ADDRESS",
+            "VPN_TUNNEL_LOCAL_SELECTOR_MISMATCH",
+            "VPN_TUNNEL_REMOTE_SELECTOR_MISMATCH",
             "PRIVATE_TRAFFIC_TO_INTERNET",
             "PRIVATE_GOOGLE_ACCESS_DISALLOWED",
+            "PRIVATE_GOOGLE_ACCESS_VIA_VPN_TUNNEL_UNSUPPORTED",
             "NO_EXTERNAL_ADDRESS",
             "UNKNOWN_INTERNAL_ADDRESS",
             "FORWARDING_RULE_MISMATCH",
-            "FORWARDING_RULE_REGION_MISMATCH",
             "FORWARDING_RULE_NO_INSTANCES",
             "FIREWALL_BLOCKING_LOAD_BALANCER_BACKEND_HEALTH_CHECK",
             "INSTANCE_NOT_RUNNING",
@@ -1026,25 +1031,33 @@
             "CLOUD_FUNCTION_NOT_ACTIVE",
             "VPC_CONNECTOR_NOT_SET",
             "VPC_CONNECTOR_NOT_RUNNING",
+            "FORWARDING_RULE_REGION_MISMATCH",
             "PSC_CONNECTION_NOT_ACCEPTED",
+            "PSC_ENDPOINT_ACCESSED_FROM_PEERED_NETWORK",
             "CLOUD_RUN_REVISION_NOT_READY",
             "DROPPED_INSIDE_PSC_SERVICE_PRODUCER",
-            "LOAD_BALANCER_HAS_NO_PROXY_SUBNET"
+            "LOAD_BALANCER_HAS_NO_PROXY_SUBNET",
+            "CLOUD_NAT_NO_ADDRESSES"
           ],
           "enumDescriptions": [
             "Cause is unspecified.",
             "Destination external address cannot be resolved to a known target. If the address is used in a Google Cloud project, provide the project ID as test input.",
             "A Compute Engine instance can only send or receive a packet with a foreign IP address if ip_forward is enabled.",
             "Dropped due to a firewall rule, unless allowed due to connection tracking.",
-            "Dropped due to no routes.",
+            "Dropped due to no matching routes.",
             "Dropped due to invalid route. Route's next hop is a blackhole.",
             "Packet is sent to a wrong (unintended) network. Example: you trace a packet from VM1:Network1 to VM2:Network2, however, the route configured in Network1 sends the packet destined for VM2's IP address to Network3.",
+            "Route's next hop IP address cannot be resolved to a GCP resource.",
+            "Route's next hop resource is not found.",
+            "Packet is sent from the Internet to the private IPv6 address.",
+            "The packet does not match a policy-based VPN tunnel local selector.",
+            "The packet does not match a policy-based VPN tunnel remote selector.",
             "Packet with internal destination address sent to the internet gateway.",
-            "Instance with only an internal IP address tries to access Google API and services, but private Google access is not enabled.",
+            "Instance with only an internal IP address tries to access Google API and services, but private Google access is not enabled in the subnet.",
+            "Source endpoint tries to access Google API and services through the VPN tunnel to another network, but Private Google Access needs to be enabled in the source endpoint network.",
             "Instance with only an internal IP address tries to access external hosts, but Cloud NAT is not enabled in the subnet, unless special configurations on a VM allow this connection.",
             "Destination internal address cannot be resolved to a known target. If this is a shared VPC scenario, verify if the service project ID is provided as test input. Otherwise, verify if the IP address is being used in the project.",
             "Forwarding rule's protocol and ports do not match the packet header.",
-            "Packet could be dropped because it was sent from a different region to a regional forwarding without global access.",
             "Forwarding rule does not have backends configured.",
             "Firewalls block the health check probes to the backends and cause the backends to be unavailable for traffic from the load balancer. For more details, see [Health check firewall rules](https://cloud.google.com/load-balancing/docs/health-checks#firewall_rules).",
             "Packet is sent from or to a Compute Engine instance that is not in a running state.",
@@ -1068,16 +1081,31 @@
             "Packet could be dropped because the Cloud Function is not in an active status.",
             "Packet could be dropped because no VPC connector is set.",
             "Packet could be dropped because the VPC connector is not in a running state.",
+            "Packet could be dropped because it was sent from a different region to a regional forwarding without global access.",
             "The Private Service Connect endpoint is in a project that is not approved to connect to the service.",
+            "The packet is sent to the Private Service Connect endpoint over the peering, but [it's not supported](https://cloud.google.com/vpc/docs/configure-private-service-connect-services#on-premises).",
             "Packet sent from a Cloud Run revision that is not ready.",
             "Packet was dropped inside Private Service Connect service producer.",
-            "Packet sent to a load balancer, which requires a proxy-only subnet and the subnet is not found."
+            "Packet sent to a load balancer, which requires a proxy-only subnet and the subnet is not found.",
+            "Packet sent to Cloud Nat without active NAT IPs."
           ],
           "type": "string"
         },
+        "destinationIp": {
+          "description": "Destination IP address of the dropped packet (if relevant).",
+          "type": "string"
+        },
+        "region": {
+          "description": "Region of the dropped packet (if relevant).",
+          "type": "string"
+        },
         "resourceUri": {
           "description": "URI of the resource that caused the drop.",
           "type": "string"
+        },
+        "sourceIp": {
+          "description": "Source IP address of the dropped packet (if relevant).",
+          "type": "string"
         }
       },
       "type": "object"
@@ -1151,7 +1179,7 @@
           "type": "string"
         },
         "ipAddress": {
-          "description": "The IP address of the endpoint, which can be an external or internal IP. An IPv6 address is only allowed when the test's destination is a [global load balancer VIP](https://cloud.google.com/load-balancing/docs/load-balancing-overview).",
+          "description": "The IP address of the endpoint, which can be an external or internal IP.",
           "type": "string"
         },
         "loadBalancerId": {
@@ -1463,13 +1491,19 @@
             "GOOGLE_SERVICE_TYPE_UNSPECIFIED",
             "IAP",
             "GFE_PROXY_OR_HEALTH_CHECK_PROBER",
-            "CLOUD_DNS"
+            "CLOUD_DNS",
+            "GOOGLE_API",
+            "GOOGLE_API_PSC",
+            "GOOGLE_API_VPC_SC"
           ],
           "enumDescriptions": [
-            "Unspecified Google Service. Includes most of Google APIs and services.",
+            "Unspecified Google Service.",
             "Identity aware proxy. https://cloud.google.com/iap/docs/using-tcp-forwarding",
             "One of two services sharing IP ranges: * Load Balancer proxy * Centralized Health Check prober https://cloud.google.com/load-balancing/docs/firewall-rules",
-            "Connectivity from Cloud DNS to forwarding targets or alternate name servers that use private routing. https://cloud.google.com/dns/docs/zones/forwarding-zones#firewall-rules https://cloud.google.com/dns/docs/policies#firewall-rules"
+            "Connectivity from Cloud DNS to forwarding targets or alternate name servers that use private routing. https://cloud.google.com/dns/docs/zones/forwarding-zones#firewall-rules https://cloud.google.com/dns/docs/policies#firewall-rules",
+            "private.googleapis.com and restricted.googleapis.com",
+            "Google API via Private Service Connect. https://cloud.google.com/vpc/docs/configure-private-service-connect-apis",
+            "Google API via VPC Service Controls. https://cloud.google.com/vpc/docs/configure-private-service-connect-apis"
           ],
           "type": "string"
         },
@@ -1658,6 +1692,56 @@
       },
       "type": "object"
     },
+    "LoadBalancerBackendInfo": {
+      "description": "For display only. Metadata associated with the load balancer backend.",
+      "id": "LoadBalancerBackendInfo",
+      "properties": {
+        "backendDisplayName": {
+          "description": "Display name of the backend. For example, it might be an instance name for the instance group backends, or an IP address and port for zonal network endpoint group backends.",
+          "type": "string"
+        },
+        "backendServiceUri": {
+          "description": "URI of the backend service this backend belongs to (if applicable).",
+          "type": "string"
+        },
+        "healthCheckConfigState": {
+          "description": "Output only. Health check configuration state for the backend. This is a result of the static firewall analysis (verifying that health check traffic from required IP ranges to the backend is allowed or not). The backend might still be unhealthy even if these firewalls are configured. Please refer to the documentation for more information: https://cloud.google.com/load-balancing/docs/firewall-rules",
+          "enum": [
+            "HEALTH_CHECK_CONFIG_STATE_UNSPECIFIED",
+            "FIREWALLS_CONFIGURED",
+            "FIREWALLS_PARTIALLY_CONFIGURED",
+            "FIREWALLS_NOT_CONFIGURED",
+            "FIREWALLS_UNSUPPORTED"
+          ],
+          "enumDescriptions": [
+            "Configuration state unspecified. It usually means that the backend has no health check attached, or there was an unexpected configuration error preventing Connectivity tests from verifying health check configuration.",
+            "Firewall rules (policies) allowing health check traffic from all required IP ranges to the backend are configured.",
+            "Firewall rules (policies) allow health check traffic only from a part of required IP ranges.",
+            "Firewall rules (policies) deny health check traffic from all required IP ranges to the backend.",
+            "The network contains firewall rules of unsupported types, so Connectivity tests were not able to verify health check configuration status. Please refer to the documentation for the list of unsupported configurations: https://cloud.google.com/network-intelligence-center/docs/connectivity-tests/concepts/overview#unsupported-configs"
+          ],
+          "readOnly": true,
+          "type": "string"
+        },
+        "healthCheckUri": {
+          "description": "URI of the health check attached to this backend (if applicable).",
+          "type": "string"
+        },
+        "instanceGroupUri": {
+          "description": "URI of the instance group this backend belongs to (if applicable).",
+          "type": "string"
+        },
+        "instanceUri": {
+          "description": "URI of the backend instance (if applicable). Populated for instance group backends, and zonal NEG backends.",
+          "type": "string"
+        },
+        "networkEndpointGroupUri": {
+          "description": "URI of the network endpoint group this backend belongs to (if applicable).",
+          "type": "string"
+        }
+      },
+      "type": "object"
+    },
     "LoadBalancerInfo": {
       "description": "For display only. Metadata associated with a load balancer.",
       "id": "LoadBalancerInfo",
@@ -1751,6 +1835,83 @@
       },
       "type": "object"
     },
+    "NatInfo": {
+      "description": "For display only. Metadata associated with NAT.",
+      "id": "NatInfo",
+      "properties": {
+        "natGatewayName": {
+          "description": "The name of Cloud NAT Gateway. Only valid when type is CLOUD_NAT.",
+          "type": "string"
+        },
+        "networkUri": {
+          "description": "URI of the network where NAT translation takes place.",
+          "type": "string"
+        },
+        "newDestinationIp": {
+          "description": "Destination IP address after NAT translation.",
+          "type": "string"
+        },
+        "newDestinationPort": {
+          "description": "Destination port after NAT translation. Only valid when protocol is TCP or UDP.",
+          "format": "int32",
+          "type": "integer"
+        },
+        "newSourceIp": {
+          "description": "Source IP address after NAT translation.",
+          "type": "string"
+        },
+        "newSourcePort": {
+          "description": "Source port after NAT translation. Only valid when protocol is TCP or UDP.",
+          "format": "int32",
+          "type": "integer"
+        },
+        "oldDestinationIp": {
+          "description": "Destination IP address before NAT translation.",
+          "type": "string"
+        },
+        "oldDestinationPort": {
+          "description": "Destination port before NAT translation. Only valid when protocol is TCP or UDP.",
+          "format": "int32",
+          "type": "integer"
+        },
+        "oldSourceIp": {
+          "description": "Source IP address before NAT translation.",
+          "type": "string"
+        },
+        "oldSourcePort": {
+          "description": "Source port before NAT translation. Only valid when protocol is TCP or UDP.",
+          "format": "int32",
+          "type": "integer"
+        },
+        "protocol": {
+          "description": "IP protocol in string format, for example: \"TCP\", \"UDP\", \"ICMP\".",
+          "type": "string"
+        },
+        "routerUri": {
+          "description": "Uri of the Cloud Router. Only valid when type is CLOUD_NAT.",
+          "type": "string"
+        },
+        "type": {
+          "description": "Type of NAT.",
+          "enum": [
+            "TYPE_UNSPECIFIED",
+            "INTERNAL_TO_EXTERNAL",
+            "EXTERNAL_TO_INTERNAL",
+            "CLOUD_NAT",
+            "PRIVATE_SERVICE_CONNECT"
+          ],
+          "enumDescriptions": [
+            "Type is unspecified.",
+            "From Compute Engine instance's internal address to external address.",
+            "From Compute Engine instance's external address to internal address.",
+            "Cloud NAT Gateway.",
+            "Private service connect NAT."
+          ],
+          "type": "string"
+        }
+      },
+      "type": "object"
+    },
     "NetworkInfo": {
       "description": "For display only. Metadata associated with a Compute Engine network.",
       "id": "NetworkInfo",
@@ -1943,6 +2104,61 @@
       },
       "type": "object"
     },
+    "ProxyConnectionInfo": {
+      "description": "For display only. Metadata associated with ProxyConnection.",
+      "id": "ProxyConnectionInfo",
+      "properties": {
+        "networkUri": {
+          "description": "URI of the network where connection is proxied.",
+          "type": "string"
+        },
+        "newDestinationIp": {
+          "description": "Destination IP address of a new connection.",
+          "type": "string"
+        },
+        "newDestinationPort": {
+          "description": "Destination port of a new connection. Only valid when protocol is TCP or UDP.",
+          "format": "int32",
+          "type": "integer"
+        },
+        "newSourceIp": {
+          "description": "Source IP address of a new connection.",
+          "type": "string"
+        },
+        "newSourcePort": {
+          "description": "Source port of a new connection. Only valid when protocol is TCP or UDP.",
+          "format": "int32",
+          "type": "integer"
+        },
+        "oldDestinationIp": {
+          "description": "Destination IP address of an original connection",
+          "type": "string"
+        },
+        "oldDestinationPort": {
+          "description": "Destination port of an original connection. Only valid when protocol is TCP or UDP.",
+          "format": "int32",
+          "type": "integer"
+        },
+        "oldSourceIp": {
+          "description": "Source IP address of an original connection.",
+          "type": "string"
+        },
+        "oldSourcePort": {
+          "description": "Source port of an original connection. Only valid when protocol is TCP or UDP.",
+          "format": "int32",
+          "type": "integer"
+        },
+        "protocol": {
+          "description": "IP protocol in string format, for example: \"TCP\", \"UDP\", \"ICMP\".",
+          "type": "string"
+        },
+        "subnetUri": {
+          "description": "Uri of proxy subnet.",
+          "type": "string"
+        }
+      },
+      "type": "object"
+    },
     "ReachabilityDetails": {
       "description": "Results of the configuration analysis from the last run of the test.",
       "id": "ReachabilityDetails",
@@ -2249,6 +2465,14 @@
           "$ref": "LoadBalancerInfo",
           "description": "Display information of the load balancers."
         },
+        "loadBalancerBackendInfo": {
+          "$ref": "LoadBalancerBackendInfo",
+          "description": "Display information of a specific load balancer backend."
+        },
+        "nat": {
+          "$ref": "NatInfo",
+          "description": "Display information of a NAT."
+        },
         "network": {
           "$ref": "NetworkInfo",
           "description": "Display information of a Google Cloud network."
@@ -2257,6 +2481,10 @@
           "description": "Project ID that contains the configuration this step is validating.",
           "type": "string"
         },
+        "proxyConnection": {
+          "$ref": "ProxyConnectionInfo",
+          "description": "Display information of a ProxyConnection."
+        },
         "route": {
           "$ref": "RouteInfo",
           "description": "Display information of a Compute Engine route."
@@ -2278,6 +2506,7 @@
             "APPLY_EGRESS_FIREWALL_RULE",
             "APPLY_ROUTE",
             "APPLY_FORWARDING_RULE",
+            "ANALYZE_LOAD_BALANCER_BACKEND",
             "SPOOFING_APPROVED",
             "ARRIVE_AT_INSTANCE",
             "ARRIVE_AT_INTERNAL_LOAD_BALANCER",
@@ -2297,7 +2526,7 @@
             "Unspecified state.",
             "Initial state: packet originating from a Compute Engine instance. An InstanceInfo is populated with starting instance information.",
             "Initial state: packet originating from the internet. The endpoint information is populated.",
-            "Initial state: packet originating from a Google service. Some Google services, such as health check probers or Identity Aware Proxy use special routes, outside VPC routing configuration to reach Compute Engine Instances.",
+            "Initial state: packet originating from a Google service. The google_service information is populated.",
             "Initial state: packet originating from a VPC or on-premises network with internal source IP. If the source is a VPC network visible to the user, a NetworkInfo is populated with details of the network.",
             "Initial state: packet originating from a Google Kubernetes Engine cluster master. A GKEMasterInfo is populated with starting instance information.",
             "Initial state: packet originating from a Cloud SQL instance. A CloudSQLInstanceInfo is populated with starting instance information.",
@@ -2308,6 +2537,7 @@
             "Config checking state: verify egress firewall rule.",
             "Config checking state: verify route.",
             "Config checking state: match forwarding rule.",
+            "Config checking state: verify load balancer backend configuration.",
             "Config checking state: packet sent or received under foreign IP address and allowed.",
             "Forwarding state: arriving at a Compute Engine instance.",
             "Forwarding state: arriving at a Compute Engine internal load balancer.",
diff --git a/googleapiclient/discovery_cache/documents/networkmanagement.v1beta1.json b/googleapiclient/discovery_cache/documents/networkmanagement.v1beta1.json
index adbda53ec3f..096fbf65b2e 100644
--- a/googleapiclient/discovery_cache/documents/networkmanagement.v1beta1.json
+++ b/googleapiclient/discovery_cache/documents/networkmanagement.v1beta1.json
@@ -591,7 +591,7 @@
       }
     }
   },
-  "revision": "20231108",
+  "revision": "20231129",
   "rootUrl": "https://networkmanagement.googleapis.com/",
   "schemas": {
     "AbortInfo": {
@@ -1002,12 +1002,17 @@
             "NO_ROUTE",
             "ROUTE_BLACKHOLE",
             "ROUTE_WRONG_NETWORK",
+            "ROUTE_NEXT_HOP_IP_ADDRESS_NOT_RESOLVED",
+            "ROUTE_NEXT_HOP_RESOURCE_NOT_FOUND",
+            "NO_ROUTE_FROM_INTERNET_TO_PRIVATE_IPV6_ADDRESS",
+            "VPN_TUNNEL_LOCAL_SELECTOR_MISMATCH",
+            "VPN_TUNNEL_REMOTE_SELECTOR_MISMATCH",
             "PRIVATE_TRAFFIC_TO_INTERNET",
             "PRIVATE_GOOGLE_ACCESS_DISALLOWED",
+            "PRIVATE_GOOGLE_ACCESS_VIA_VPN_TUNNEL_UNSUPPORTED",
             "NO_EXTERNAL_ADDRESS",
             "UNKNOWN_INTERNAL_ADDRESS",
             "FORWARDING_RULE_MISMATCH",
-            "FORWARDING_RULE_REGION_MISMATCH",
             "FORWARDING_RULE_NO_INSTANCES",
             "FIREWALL_BLOCKING_LOAD_BALANCER_BACKEND_HEALTH_CHECK",
             "INSTANCE_NOT_RUNNING",
@@ -1031,25 +1036,33 @@
             "CLOUD_FUNCTION_NOT_ACTIVE",
             "VPC_CONNECTOR_NOT_SET",
             "VPC_CONNECTOR_NOT_RUNNING",
+            "FORWARDING_RULE_REGION_MISMATCH",
             "PSC_CONNECTION_NOT_ACCEPTED",
+            "PSC_ENDPOINT_ACCESSED_FROM_PEERED_NETWORK",
             "CLOUD_RUN_REVISION_NOT_READY",
             "DROPPED_INSIDE_PSC_SERVICE_PRODUCER",
-            "LOAD_BALANCER_HAS_NO_PROXY_SUBNET"
+            "LOAD_BALANCER_HAS_NO_PROXY_SUBNET",
+            "CLOUD_NAT_NO_ADDRESSES"
           ],
           "enumDescriptions": [
             "Cause is unspecified.",
             "Destination external address cannot be resolved to a known target. If the address is used in a Google Cloud project, provide the project ID as test input.",
             "A Compute Engine instance can only send or receive a packet with a foreign IP address if ip_forward is enabled.",
             "Dropped due to a firewall rule, unless allowed due to connection tracking.",
-            "Dropped due to no routes.",
+            "Dropped due to no matching routes.",
             "Dropped due to invalid route. Route's next hop is a blackhole.",
             "Packet is sent to a wrong (unintended) network. Example: you trace a packet from VM1:Network1 to VM2:Network2, however, the route configured in Network1 sends the packet destined for VM2's IP address to Network3.",
+            "Route's next hop IP address cannot be resolved to a GCP resource.",
+            "Route's next hop resource is not found.",
+            "Packet is sent from the Internet to the private IPv6 address.",
+            "The packet does not match a policy-based VPN tunnel local selector.",
+            "The packet does not match a policy-based VPN tunnel remote selector.",
             "Packet with internal destination address sent to the internet gateway.",
-            "Instance with only an internal IP address tries to access Google API and services, but private Google access is not enabled.",
+            "Instance with only an internal IP address tries to access Google API and services, but private Google access is not enabled in the subnet.",
+            "Source endpoint tries to access Google API and services through the VPN tunnel to another network, but Private Google Access needs to be enabled in the source endpoint network.",
             "Instance with only an internal IP address tries to access external hosts, but Cloud NAT is not enabled in the subnet, unless special configurations on a VM allow this connection.",
             "Destination internal address cannot be resolved to a known target. If this is a shared VPC scenario, verify if the service project ID is provided as test input. Otherwise, verify if the IP address is being used in the project.",
             "Forwarding rule's protocol and ports do not match the packet header.",
-            "Packet could be dropped because it was sent from a different region to a regional forwarding without global access.",
             "Forwarding rule does not have backends configured.",
             "Firewalls block the health check probes to the backends and cause the backends to be unavailable for traffic from the load balancer. For more details, see [Health check firewall rules](https://cloud.google.com/load-balancing/docs/health-checks#firewall_rules).",
             "Packet is sent from or to a Compute Engine instance that is not in a running state.",
@@ -1073,16 +1086,31 @@
             "Packet could be dropped because the Cloud Function is not in an active status.",
             "Packet could be dropped because no VPC connector is set.",
             "Packet could be dropped because the VPC connector is not in a running state.",
+            "Packet could be dropped because it was sent from a different region to a regional forwarding without global access.",
             "The Private Service Connect endpoint is in a project that is not approved to connect to the service.",
+            "The packet is sent to the Private Service Connect endpoint over the peering, but [it's not supported](https://cloud.google.com/vpc/docs/configure-private-service-connect-services#on-premises).",
             "Packet sent from a Cloud Run revision that is not ready.",
             "Packet was dropped inside Private Service Connect service producer.",
-            "Packet sent to a load balancer, which requires a proxy-only subnet and the subnet is not found."
+            "Packet sent to a load balancer, which requires a proxy-only subnet and the subnet is not found.",
+            "Packet sent to Cloud Nat without active NAT IPs."
           ],
           "type": "string"
         },
+        "destinationIp": {
+          "description": "Destination IP address of the dropped packet (if relevant).",
+          "type": "string"
+        },
+        "region": {
+          "description": "Region of the dropped packet (if relevant).",
+          "type": "string"
+        },
         "resourceUri": {
           "description": "URI of the resource that caused the drop.",
           "type": "string"
+        },
+        "sourceIp": {
+          "description": "Source IP address of the dropped packet (if relevant).",
+          "type": "string"
         }
       },
       "type": "object"
@@ -1156,7 +1184,7 @@
           "type": "string"
         },
         "ipAddress": {
-          "description": "The IP address of the endpoint, which can be an external or internal IP. An IPv6 address is only allowed when the test's destination is a [global load balancer VIP](https://cloud.google.com/load-balancing/docs/load-balancing-overview).",
+          "description": "The IP address of the endpoint, which can be an external or internal IP.",
           "type": "string"
         },
         "loadBalancerId": {
@@ -1468,13 +1496,19 @@
             "GOOGLE_SERVICE_TYPE_UNSPECIFIED",
             "IAP",
             "GFE_PROXY_OR_HEALTH_CHECK_PROBER",
-            "CLOUD_DNS"
+            "CLOUD_DNS",
+            "GOOGLE_API",
+            "GOOGLE_API_PSC",
+            "GOOGLE_API_VPC_SC"
           ],
           "enumDescriptions": [
-            "Unspecified Google Service. Includes most of Google APIs and services.",
+            "Unspecified Google Service.",
             "Identity aware proxy. https://cloud.google.com/iap/docs/using-tcp-forwarding",
             "One of two services sharing IP ranges: * Load Balancer proxy * Centralized Health Check prober https://cloud.google.com/load-balancing/docs/firewall-rules",
-            "Connectivity from Cloud DNS to forwarding targets or alternate name servers that use private routing. https://cloud.google.com/dns/docs/zones/forwarding-zones#firewall-rules https://cloud.google.com/dns/docs/policies#firewall-rules"
+            "Connectivity from Cloud DNS to forwarding targets or alternate name servers that use private routing. https://cloud.google.com/dns/docs/zones/forwarding-zones#firewall-rules https://cloud.google.com/dns/docs/policies#firewall-rules",
+            "private.googleapis.com and restricted.googleapis.com",
+            "Google API via Private Service Connect. https://cloud.google.com/vpc/docs/configure-private-service-connect-apis",
+            "Google API via VPC Service Controls. https://cloud.google.com/vpc/docs/configure-private-service-connect-apis"
           ],
           "type": "string"
         },
@@ -1663,6 +1697,56 @@
       },
       "type": "object"
     },
+    "LoadBalancerBackendInfo": {
+      "description": "For display only. Metadata associated with the load balancer backend.",
+      "id": "LoadBalancerBackendInfo",
+      "properties": {
+        "backendDisplayName": {
+          "description": "Display name of the backend. For example, it might be an instance name for the instance group backends, or an IP address and port for zonal network endpoint group backends.",
+          "type": "string"
+        },
+        "backendServiceUri": {
+          "description": "URI of the backend service this backend belongs to (if applicable).",
+          "type": "string"
+        },
+        "healthCheckConfigState": {
+          "description": "Output only. Health check configuration state for the backend. This is a result of the static firewall analysis (verifying that health check traffic from required IP ranges to the backend is allowed or not). The backend might still be unhealthy even if these firewalls are configured. Please refer to the documentation for more information: https://cloud.google.com/load-balancing/docs/firewall-rules",
+          "enum": [
+            "HEALTH_CHECK_CONFIG_STATE_UNSPECIFIED",
+            "FIREWALLS_CONFIGURED",
+            "FIREWALLS_PARTIALLY_CONFIGURED",
+            "FIREWALLS_NOT_CONFIGURED",
+            "FIREWALLS_UNSUPPORTED"
+          ],
+          "enumDescriptions": [
+            "Configuration state unspecified. It usually means that the backend has no health check attached, or there was an unexpected configuration error preventing Connectivity tests from verifying health check configuration.",
+            "Firewall rules (policies) allowing health check traffic from all required IP ranges to the backend are configured.",
+            "Firewall rules (policies) allow health check traffic only from a part of required IP ranges.",
+            "Firewall rules (policies) deny health check traffic from all required IP ranges to the backend.",
+            "The network contains firewall rules of unsupported types, so Connectivity tests were not able to verify health check configuration status. Please refer to the documentation for the list of unsupported configurations: https://cloud.google.com/network-intelligence-center/docs/connectivity-tests/concepts/overview#unsupported-configs"
+          ],
+          "readOnly": true,
+          "type": "string"
+        },
+        "healthCheckUri": {
+          "description": "URI of the health check attached to this backend (if applicable).",
+          "type": "string"
+        },
+        "instanceGroupUri": {
+          "description": "URI of the instance group this backend belongs to (if applicable).",
+          "type": "string"
+        },
+        "instanceUri": {
+          "description": "URI of the backend instance (if applicable). Populated for instance group backends, and zonal NEG backends.",
+          "type": "string"
+        },
+        "networkEndpointGroupUri": {
+          "description": "URI of the network endpoint group this backend belongs to (if applicable).",
+          "type": "string"
+        }
+      },
+      "type": "object"
+    },
     "LoadBalancerInfo": {
       "description": "For display only. Metadata associated with a load balancer.",
       "id": "LoadBalancerInfo",
@@ -1756,6 +1840,83 @@
       },
       "type": "object"
     },
+    "NatInfo": {
+      "description": "For display only. Metadata associated with NAT.",
+      "id": "NatInfo",
+      "properties": {
+        "natGatewayName": {
+          "description": "The name of Cloud NAT Gateway. Only valid when type is CLOUD_NAT.",
+          "type": "string"
+        },
+        "networkUri": {
+          "description": "URI of the network where NAT translation takes place.",
+          "type": "string"
+        },
+        "newDestinationIp": {
+          "description": "Destination IP address after NAT translation.",
+          "type": "string"
+        },
+        "newDestinationPort": {
+          "description": "Destination port after NAT translation. Only valid when protocol is TCP or UDP.",
+          "format": "int32",
+          "type": "integer"
+        },
+        "newSourceIp": {
+          "description": "Source IP address after NAT translation.",
+          "type": "string"
+        },
+        "newSourcePort": {
+          "description": "Source port after NAT translation. Only valid when protocol is TCP or UDP.",
+          "format": "int32",
+          "type": "integer"
+        },
+        "oldDestinationIp": {
+          "description": "Destination IP address before NAT translation.",
+          "type": "string"
+        },
+        "oldDestinationPort": {
+          "description": "Destination port before NAT translation. Only valid when protocol is TCP or UDP.",
+          "format": "int32",
+          "type": "integer"
+        },
+        "oldSourceIp": {
+          "description": "Source IP address before NAT translation.",
+          "type": "string"
+        },
+        "oldSourcePort": {
+          "description": "Source port before NAT translation. Only valid when protocol is TCP or UDP.",
+          "format": "int32",
+          "type": "integer"
+        },
+        "protocol": {
+          "description": "IP protocol in string format, for example: \"TCP\", \"UDP\", \"ICMP\".",
+          "type": "string"
+        },
+        "routerUri": {
+          "description": "Uri of the Cloud Router. Only valid when type is CLOUD_NAT.",
+          "type": "string"
+        },
+        "type": {
+          "description": "Type of NAT.",
+          "enum": [
+            "TYPE_UNSPECIFIED",
+            "INTERNAL_TO_EXTERNAL",
+            "EXTERNAL_TO_INTERNAL",
+            "CLOUD_NAT",
+            "PRIVATE_SERVICE_CONNECT"
+          ],
+          "enumDescriptions": [
+            "Type is unspecified.",
+            "From Compute Engine instance's internal address to external address.",
+            "From Compute Engine instance's external address to internal address.",
+            "Cloud NAT Gateway.",
+            "Private service connect NAT."
+          ],
+          "type": "string"
+        }
+      },
+      "type": "object"
+    },
     "NetworkInfo": {
       "description": "For display only. Metadata associated with a Compute Engine network.",
       "id": "NetworkInfo",
@@ -1948,6 +2109,61 @@
       },
       "type": "object"
     },
+    "ProxyConnectionInfo": {
+      "description": "For display only. Metadata associated with ProxyConnection.",
+      "id": "ProxyConnectionInfo",
+      "properties": {
+        "networkUri": {
+          "description": "URI of the network where connection is proxied.",
+          "type": "string"
+        },
+        "newDestinationIp": {
+          "description": "Destination IP address of a new connection.",
+          "type": "string"
+        },
+        "newDestinationPort": {
+          "description": "Destination port of a new connection. Only valid when protocol is TCP or UDP.",
+          "format": "int32",
+          "type": "integer"
+        },
+        "newSourceIp": {
+          "description": "Source IP address of a new connection.",
+          "type": "string"
+        },
+        "newSourcePort": {
+          "description": "Source port of a new connection. Only valid when protocol is TCP or UDP.",
+          "format": "int32",
+          "type": "integer"
+        },
+        "oldDestinationIp": {
+          "description": "Destination IP address of an original connection",
+          "type": "string"
+        },
+        "oldDestinationPort": {
+          "description": "Destination port of an original connection. Only valid when protocol is TCP or UDP.",
+          "format": "int32",
+          "type": "integer"
+        },
+        "oldSourceIp": {
+          "description": "Source IP address of an original connection.",
+          "type": "string"
+        },
+        "oldSourcePort": {
+          "description": "Source port of an original connection. Only valid when protocol is TCP or UDP.",
+          "format": "int32",
+          "type": "integer"
+        },
+        "protocol": {
+          "description": "IP protocol in string format, for example: \"TCP\", \"UDP\", \"ICMP\".",
+          "type": "string"
+        },
+        "subnetUri": {
+          "description": "Uri of proxy subnet.",
+          "type": "string"
+        }
+      },
+      "type": "object"
+    },
     "ReachabilityDetails": {
       "description": "Results of the configuration analysis from the last run of the test.",
       "id": "ReachabilityDetails",
@@ -2254,6 +2470,14 @@
           "$ref": "LoadBalancerInfo",
           "description": "Display information of the load balancers."
         },
+        "loadBalancerBackendInfo": {
+          "$ref": "LoadBalancerBackendInfo",
+          "description": "Display information of a specific load balancer backend."
+        },
+        "nat": {
+          "$ref": "NatInfo",
+          "description": "Display information of a NAT."
+        },
         "network": {
           "$ref": "NetworkInfo",
           "description": "Display information of a Google Cloud network."
@@ -2262,6 +2486,10 @@
           "description": "Project ID that contains the configuration this step is validating.",
           "type": "string"
         },
+        "proxyConnection": {
+          "$ref": "ProxyConnectionInfo",
+          "description": "Display information of a ProxyConnection."
+        },
         "route": {
           "$ref": "RouteInfo",
           "description": "Display information of a Compute Engine route."
@@ -2283,6 +2511,7 @@
             "APPLY_EGRESS_FIREWALL_RULE",
             "APPLY_ROUTE",
             "APPLY_FORWARDING_RULE",
+            "ANALYZE_LOAD_BALANCER_BACKEND",
             "SPOOFING_APPROVED",
             "ARRIVE_AT_INSTANCE",
             "ARRIVE_AT_INTERNAL_LOAD_BALANCER",
@@ -2302,7 +2531,7 @@
             "Unspecified state.",
             "Initial state: packet originating from a Compute Engine instance. An InstanceInfo is populated with starting instance information.",
             "Initial state: packet originating from the internet. The endpoint information is populated.",
-            "Initial state: packet originating from a Google service. Some Google services, such as health check probers or Identity Aware Proxy use special routes, outside VPC routing configuration to reach Compute Engine Instances.",
+            "Initial state: packet originating from a Google service. The google_service information is populated.",
             "Initial state: packet originating from a VPC or on-premises network with internal source IP. If the source is a VPC network visible to the user, a NetworkInfo is populated with details of the network.",
             "Initial state: packet originating from a Google Kubernetes Engine cluster master. A GKEMasterInfo is populated with starting instance information.",
             "Initial state: packet originating from a Cloud SQL instance. A CloudSQLInstanceInfo is populated with starting instance information.",
@@ -2313,6 +2542,7 @@
             "Config checking state: verify egress firewall rule.",
             "Config checking state: verify route.",
             "Config checking state: match forwarding rule.",
+            "Config checking state: verify load balancer backend configuration.",
             "Config checking state: packet sent or received under foreign IP address and allowed.",
             "Forwarding state: arriving at a Compute Engine instance.",
             "Forwarding state: arriving at a Compute Engine internal load balancer.",
diff --git a/googleapiclient/discovery_cache/documents/networksecurity.v1.json b/googleapiclient/discovery_cache/documents/networksecurity.v1.json
index b596578e5ec..eeddf4b5903 100644
--- a/googleapiclient/discovery_cache/documents/networksecurity.v1.json
+++ b/googleapiclient/discovery_cache/documents/networksecurity.v1.json
@@ -2474,7 +2474,7 @@
       }
     }
   },
-  "revision": "20231101",
+  "revision": "20231205",
   "rootUrl": "https://networksecurity.googleapis.com/",
   "schemas": {
     "AddAddressGroupItemsRequest": {
diff --git a/googleapiclient/discovery_cache/documents/networksecurity.v1beta1.json b/googleapiclient/discovery_cache/documents/networksecurity.v1beta1.json
index 928b5a7a1f2..3bde8ebdf80 100644
--- a/googleapiclient/discovery_cache/documents/networksecurity.v1beta1.json
+++ b/googleapiclient/discovery_cache/documents/networksecurity.v1beta1.json
@@ -552,7 +552,7 @@
                   ],
                   "parameters": {
                     "name": {
-                      "description": "Output only. name of resource",
+                      "description": "Immutable. Identifier. name of resource",
                       "location": "path",
                       "pattern": "^organizations/[^/]+/locations/[^/]+/firewallEndpoints/[^/]+$",
                       "required": true,
@@ -2124,7 +2124,7 @@
                   ],
                   "parameters": {
                     "name": {
-                      "description": "Output only. name of resource",
+                      "description": "Immutable. Identifier. name of resource",
                       "location": "path",
                       "pattern": "^projects/[^/]+/locations/[^/]+/firewallEndpointAssociations/[^/]+$",
                       "required": true,
@@ -3162,7 +3162,7 @@
       }
     }
   },
-  "revision": "20231101",
+  "revision": "20231205",
   "rootUrl": "https://networksecurity.googleapis.com/",
   "schemas": {
     "AddAddressGroupItemsRequest": {
@@ -3451,6 +3451,7 @@
       "id": "FirewallEndpoint",
       "properties": {
         "associatedNetworks": {
+          "deprecated": true,
           "description": "Output only. List of networks that are associated with this endpoint in the local zone. This is a projection of the FirewallEndpointAssociations pointing at this endpoint. A network will only appear in this list after traffic routing is fully configured. Format: projects/{project}/global/networks/{name}.",
           "items": {
             "type": "string"
@@ -3458,6 +3459,14 @@
           "readOnly": true,
           "type": "array"
         },
+        "associations": {
+          "description": "Output only. List of FirewallEndpointAssociations that are associated to this endpoint. An association will only appear in this list after traffic routing is fully configured.",
+          "items": {
+            "$ref": "FirewallEndpointAssociationReference"
+          },
+          "readOnly": true,
+          "type": "array"
+        },
         "billingProjectId": {
           "description": "Optional. Project to bill on endpoint uptime usage.",
           "type": "string"
@@ -3480,8 +3489,7 @@
           "type": "object"
         },
         "name": {
-          "description": "Output only. name of resource",
-          "readOnly": true,
+          "description": "Immutable. Identifier. name of resource",
           "type": "string"
         },
         "reconciling": {
@@ -3539,8 +3547,7 @@
           "type": "object"
         },
         "name": {
-          "description": "Output only. name of resource",
-          "readOnly": true,
+          "description": "Immutable. Identifier. name of resource",
           "type": "string"
         },
         "network": {
@@ -3584,6 +3591,23 @@
       },
       "type": "object"
     },
+    "FirewallEndpointAssociationReference": {
+      "description": "This is a subset of the FirewallEndpointAssociation message, containing fields to be used by the consumer.",
+      "id": "FirewallEndpointAssociationReference",
+      "properties": {
+        "name": {
+          "description": "Output only. The resource name of the FirewallEndpointAssociation. Format: projects/{project}/locations/{location}/firewallEndpointAssociations/{id}",
+          "readOnly": true,
+          "type": "string"
+        },
+        "network": {
+          "description": "Output only. The VPC network associated. Format: projects/{project}/global/networks/{name}.",
+          "readOnly": true,
+          "type": "string"
+        }
+      },
+      "type": "object"
+    },
     "GatewaySecurityPolicy": {
       "description": "The GatewaySecurityPolicy resource contains a collection of GatewaySecurityPolicyRules and associated metadata.",
       "id": "GatewaySecurityPolicy",
diff --git a/googleapiclient/discovery_cache/documents/networkservices.v1.json b/googleapiclient/discovery_cache/documents/networkservices.v1.json
index 0f1549c640f..faee9d7d76f 100644
--- a/googleapiclient/discovery_cache/documents/networkservices.v1.json
+++ b/googleapiclient/discovery_cache/documents/networkservices.v1.json
@@ -2148,7 +2148,7 @@
       }
     }
   },
-  "revision": "20231025",
+  "revision": "20231129",
   "rootUrl": "https://networkservices.googleapis.com/",
   "schemas": {
     "AuditConfig": {
@@ -2379,7 +2379,7 @@
       "type": "object"
     },
     "Gateway": {
-      "description": "Gateway represents the configuration for a proxy, typically a load balancer. It captures the ip:port over which the services are exposed by the proxy, along with any policy configurations. Routes have reference to to Gateways to dictate how requests should be routed by this Gateway.",
+      "description": "Gateway represents the configuration for a proxy, typically a load balancer. It captures the ip:port over which the services are exposed by the proxy, along with any policy configurations. Routes have reference to to Gateways to dictate how requests should be routed by this Gateway. Next id: 29",
       "id": "Gateway",
       "properties": {
         "addresses": {
@@ -2410,6 +2410,20 @@
           "description": "Optional. A fully-qualified GatewaySecurityPolicy URL reference. Defines how a server should apply security policy to inbound (VM to Proxy) initiated connections. For example: `projects/*/locations/*/gatewaySecurityPolicies/swg-policy`. This policy is specific to gateways of type 'SECURE_WEB_GATEWAY'.",
           "type": "string"
         },
+        "ipVersion": {
+          "description": "Optional. The IP Version that will be used by this gateway. Valid options are IPV4 or IPV6. Default is IPV4.",
+          "enum": [
+            "IP_VERSION_UNSPECIFIED",
+            "IPV4",
+            "IPV6"
+          ],
+          "enumDescriptions": [
+            "The type when IP version is not specified. Defaults to IPV4.",
+            "The type for IP version 4.",
+            "The type for IP version 6."
+          ],
+          "type": "string"
+        },
         "labels": {
           "additionalProperties": {
             "type": "string"
diff --git a/googleapiclient/discovery_cache/documents/networkservices.v1beta1.json b/googleapiclient/discovery_cache/documents/networkservices.v1beta1.json
index a2fb2cc7e29..08cc1398d89 100644
--- a/googleapiclient/discovery_cache/documents/networkservices.v1beta1.json
+++ b/googleapiclient/discovery_cache/documents/networkservices.v1beta1.json
@@ -2483,7 +2483,7 @@
       }
     }
   },
-  "revision": "20231025",
+  "revision": "20231129",
   "rootUrl": "https://networkservices.googleapis.com/",
   "schemas": {
     "AuditConfig": {
@@ -2716,7 +2716,7 @@
           "type": "string"
         },
         "service": {
-          "description": "Required. The reference to the service that runs the extension. Must be a reference to a [backend service](https://cloud.google.com/compute/docs/reference/rest/v1/backendServices).",
+          "description": "Required. The reference to the service that runs the extension. Currently only Callout extensions are supported here. To configure a Callout extension, `service` must be a fully-qualified reference to a [backend service](https://cloud.google.com/compute/docs/reference/rest/v1/backendServices) in the format: `https://www.googleapis.com/compute/v1/projects/{project}/regions/{region}/backendServices/{backendService}` or `https://www.googleapis.com/compute/v1/projects/{project}/global/backendServices/{backendService}`.",
           "type": "string"
         },
         "supportedEvents": {
@@ -2753,14 +2753,14 @@
       "id": "ExtensionChainMatchCondition",
       "properties": {
         "celExpression": {
-          "description": "Required. A Common Expression Language (CEL) expression that is used to match requests for which the extension chain is executed.",
+          "description": "Required. A Common Expression Language (CEL) expression that is used to match requests for which the extension chain is executed. For more information, see [CEL matcher language reference](https://cloud.google.com/service-extensions/docs/cel-matcher-language-reference).",
           "type": "string"
         }
       },
       "type": "object"
     },
     "Gateway": {
-      "description": "Gateway represents the configuration for a proxy, typically a load balancer. It captures the ip:port over which the services are exposed by the proxy, along with any policy configurations. Routes have reference to to Gateways to dictate how requests should be routed by this Gateway.",
+      "description": "Gateway represents the configuration for a proxy, typically a load balancer. It captures the ip:port over which the services are exposed by the proxy, along with any policy configurations. Routes have reference to to Gateways to dictate how requests should be routed by this Gateway. Next id: 29",
       "id": "Gateway",
       "properties": {
         "addresses": {
@@ -2791,6 +2791,20 @@
           "description": "Optional. A fully-qualified GatewaySecurityPolicy URL reference. Defines how a server should apply security policy to inbound (VM to Proxy) initiated connections. For example: `projects/*/locations/*/gatewaySecurityPolicies/swg-policy`. This policy is specific to gateways of type 'SECURE_WEB_GATEWAY'.",
           "type": "string"
         },
+        "ipVersion": {
+          "description": "Optional. The IP Version that will be used by this gateway. Valid options are IPV4 or IPV6. Default is IPV4.",
+          "enum": [
+            "IP_VERSION_UNSPECIFIED",
+            "IPV4",
+            "IPV6"
+          ],
+          "enumDescriptions": [
+            "The type when IP version is not specified. Defaults to IPV4.",
+            "The type for IP version 4.",
+            "The type for IP version 6."
+          ],
+          "type": "string"
+        },
         "labels": {
           "additionalProperties": {
             "type": "string"
diff --git a/googleapiclient/discovery_cache/documents/notebooks.v1.json b/googleapiclient/discovery_cache/documents/notebooks.v1.json
index b7e6e376391..6c7b56b5d83 100644
--- a/googleapiclient/discovery_cache/documents/notebooks.v1.json
+++ b/googleapiclient/discovery_cache/documents/notebooks.v1.json
@@ -2008,7 +2008,7 @@
       }
     }
   },
-  "revision": "20231024",
+  "revision": "20231203",
   "rootUrl": "https://notebooks.googleapis.com/",
   "schemas": {
     "AcceleratorConfig": {
diff --git a/googleapiclient/discovery_cache/documents/notebooks.v2.json b/googleapiclient/discovery_cache/documents/notebooks.v2.json
index 850a328872f..8602ec64755 100644
--- a/googleapiclient/discovery_cache/documents/notebooks.v2.json
+++ b/googleapiclient/discovery_cache/documents/notebooks.v2.json
@@ -848,7 +848,7 @@
       }
     }
   },
-  "revision": "20231024",
+  "revision": "20231203",
   "rootUrl": "https://notebooks.googleapis.com/",
   "schemas": {
     "AcceleratorConfig": {
@@ -1426,6 +1426,11 @@
           "readOnly": true,
           "type": "string"
         },
+        "thirdPartyProxyUrl": {
+          "description": "Output only. The workforce pools proxy endpoint that is used to access the Jupyter notebook.",
+          "readOnly": true,
+          "type": "string"
+        },
         "updateTime": {
           "description": "Output only. Instance update time.",
           "format": "google-datetime",
diff --git a/googleapiclient/discovery_cache/documents/orgpolicy.v2.json b/googleapiclient/discovery_cache/documents/orgpolicy.v2.json
index 0cdb575b5f3..927c35ef308 100644
--- a/googleapiclient/discovery_cache/documents/orgpolicy.v2.json
+++ b/googleapiclient/discovery_cache/documents/orgpolicy.v2.json
@@ -186,6 +186,11 @@
                 "name"
               ],
               "parameters": {
+                "etag": {
+                  "description": "Optional. The current etag of policy. If an etag is provided and does not match the current etag of the policy, deletion will be blocked and an ABORTED error will be returned.",
+                  "location": "query",
+                  "type": "string"
+                },
                 "name": {
                   "description": "Required. Name of the policy to delete. See the policy entry for naming rules.",
                   "location": "path",
@@ -553,6 +558,11 @@
                 "name"
               ],
               "parameters": {
+                "etag": {
+                  "description": "Optional. The current etag of policy. If an etag is provided and does not match the current etag of the policy, deletion will be blocked and an ABORTED error will be returned.",
+                  "location": "query",
+                  "type": "string"
+                },
                 "name": {
                   "description": "Required. Name of the policy to delete. See the policy entry for naming rules.",
                   "location": "path",
@@ -774,6 +784,11 @@
                 "name"
               ],
               "parameters": {
+                "etag": {
+                  "description": "Optional. The current etag of policy. If an etag is provided and does not match the current etag of the policy, deletion will be blocked and an ABORTED error will be returned.",
+                  "location": "query",
+                  "type": "string"
+                },
                 "name": {
                   "description": "Required. Name of the policy to delete. See the policy entry for naming rules.",
                   "location": "path",
@@ -915,7 +930,7 @@
       }
     }
   },
-  "revision": "20231205",
+  "revision": "20231210",
   "rootUrl": "https://orgpolicy.googleapis.com/",
   "schemas": {
     "GoogleCloudOrgpolicyV2AlternatePolicySpec": {
@@ -1135,6 +1150,10 @@
           "$ref": "GoogleCloudOrgpolicyV2PolicySpec",
           "description": "Dry-run policy. Audit-only policy, can be used to monitor how the policy would have impacted the existing and future resources if it's enforced."
         },
+        "etag": {
+          "description": "Optional. An opaque tag indicating the current state of the policy, used for concurrency control. This 'etag' is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding.",
+          "type": "string"
+        },
         "name": {
           "description": "Immutable. The resource name of the policy. Must be one of the following forms, where `constraint_name` is the name of the constraint which this policy configures: * `projects/{project_number}/policies/{constraint_name}` * `folders/{folder_id}/policies/{constraint_name}` * `organizations/{organization_id}/policies/{constraint_name}` For example, `projects/123/policies/compute.disableSerialPortAccess`. Note: `projects/{project_id}/policies/{constraint_name}` is also an acceptable name for API requests, but responses will return the name using the equivalent project number.",
           "type": "string"
diff --git a/googleapiclient/discovery_cache/documents/oslogin.v1.json b/googleapiclient/discovery_cache/documents/oslogin.v1.json
index 621d776e643..9eca207efa3 100644
--- a/googleapiclient/discovery_cache/documents/oslogin.v1.json
+++ b/googleapiclient/discovery_cache/documents/oslogin.v1.json
@@ -343,7 +343,7 @@
       }
     }
   },
-  "revision": "20231112",
+  "revision": "20231203",
   "rootUrl": "https://oslogin.googleapis.com/",
   "schemas": {
     "Empty": {
diff --git a/googleapiclient/discovery_cache/documents/oslogin.v1alpha.json b/googleapiclient/discovery_cache/documents/oslogin.v1alpha.json
index 6079733446f..23e4eae1850 100644
--- a/googleapiclient/discovery_cache/documents/oslogin.v1alpha.json
+++ b/googleapiclient/discovery_cache/documents/oslogin.v1alpha.json
@@ -471,7 +471,7 @@
       }
     }
   },
-  "revision": "20231112",
+  "revision": "20231203",
   "rootUrl": "https://oslogin.googleapis.com/",
   "schemas": {
     "Empty": {
diff --git a/googleapiclient/discovery_cache/documents/oslogin.v1beta.json b/googleapiclient/discovery_cache/documents/oslogin.v1beta.json
index 02b85a7b1f5..698bf3da30b 100644
--- a/googleapiclient/discovery_cache/documents/oslogin.v1beta.json
+++ b/googleapiclient/discovery_cache/documents/oslogin.v1beta.json
@@ -441,7 +441,7 @@
       }
     }
   },
-  "revision": "20231112",
+  "revision": "20231203",
   "rootUrl": "https://oslogin.googleapis.com/",
   "schemas": {
     "Empty": {
diff --git a/googleapiclient/discovery_cache/documents/paymentsresellersubscription.v1.json b/googleapiclient/discovery_cache/documents/paymentsresellersubscription.v1.json
index 2495264849e..32e5fd88422 100644
--- a/googleapiclient/discovery_cache/documents/paymentsresellersubscription.v1.json
+++ b/googleapiclient/discovery_cache/documents/paymentsresellersubscription.v1.json
@@ -435,7 +435,7 @@
       }
     }
   },
-  "revision": "20231205",
+  "revision": "20231210",
   "rootUrl": "https://paymentsresellersubscription.googleapis.com/",
   "schemas": {
     "GoogleCloudPaymentsResellerSubscriptionV1Amount": {
@@ -482,11 +482,11 @@
             "Buyer's remorse.",
             "Accidential purchase.",
             "Payment is past due.",
-            "User account closed.",
+            "Used for notification only, do not use in Cancel API. User account closed.",
             "Used for notification only, do not use in Cancel API. Cancellation due to upgrade or downgrade.",
             "Cancellation due to user delinquency",
-            "Cancellation due to an unrecoverable system error.",
-            "Cancellation by a system.",
+            "Used for notification only, do not use in Cancel API. Cancellation due to an unrecoverable system error.",
+            "Used for notification only, do not use in Cancel API. The subscription is cancelled by Google automatically since it is no longer valid.",
             "Other reason."
           ],
           "type": "string"
@@ -1176,11 +1176,11 @@
             "Buyer's remorse.",
             "Accidential purchase.",
             "Payment is past due.",
-            "User account closed.",
+            "Used for notification only, do not use in Cancel API. User account closed.",
             "Used for notification only, do not use in Cancel API. Cancellation due to upgrade or downgrade.",
             "Cancellation due to user delinquency",
-            "Cancellation due to an unrecoverable system error.",
-            "Cancellation by a system.",
+            "Used for notification only, do not use in Cancel API. Cancellation due to an unrecoverable system error.",
+            "Used for notification only, do not use in Cancel API. The subscription is cancelled by Google automatically since it is no longer valid.",
             "Other reason."
           ],
           "type": "string"
diff --git a/googleapiclient/discovery_cache/documents/people.v1.json b/googleapiclient/discovery_cache/documents/people.v1.json
index feafca699f3..a12abd9c13c 100644
--- a/googleapiclient/discovery_cache/documents/people.v1.json
+++ b/googleapiclient/discovery_cache/documents/people.v1.json
@@ -1172,7 +1172,7 @@
       }
     }
   },
-  "revision": "20231205",
+  "revision": "20231210",
   "rootUrl": "https://people.googleapis.com/",
   "schemas": {
     "Address": {
diff --git a/googleapiclient/discovery_cache/documents/places.v1.json b/googleapiclient/discovery_cache/documents/places.v1.json
index 6c8d6c4507b..56fa7bf9eb4 100644
--- a/googleapiclient/discovery_cache/documents/places.v1.json
+++ b/googleapiclient/discovery_cache/documents/places.v1.json
@@ -248,7 +248,7 @@
       }
     }
   },
-  "revision": "20231205",
+  "revision": "20231210",
   "rootUrl": "https://places.googleapis.com/",
   "schemas": {
     "GoogleGeoTypeViewport": {
diff --git a/googleapiclient/discovery_cache/documents/playcustomapp.v1.json b/googleapiclient/discovery_cache/documents/playcustomapp.v1.json
index a50438d36c7..3fe4ed7aa62 100644
--- a/googleapiclient/discovery_cache/documents/playcustomapp.v1.json
+++ b/googleapiclient/discovery_cache/documents/playcustomapp.v1.json
@@ -158,7 +158,7 @@
       }
     }
   },
-  "revision": "20231205",
+  "revision": "20231211",
   "rootUrl": "https://playcustomapp.googleapis.com/",
   "schemas": {
     "CustomApp": {
diff --git a/googleapiclient/discovery_cache/documents/playdeveloperreporting.v1alpha1.json b/googleapiclient/discovery_cache/documents/playdeveloperreporting.v1alpha1.json
index 30099fddc83..2ee76ba17f1 100644
--- a/googleapiclient/discovery_cache/documents/playdeveloperreporting.v1alpha1.json
+++ b/googleapiclient/discovery_cache/documents/playdeveloperreporting.v1alpha1.json
@@ -941,7 +941,7 @@
       }
     }
   },
-  "revision": "20231205",
+  "revision": "20231211",
   "rootUrl": "https://playdeveloperreporting.googleapis.com/",
   "schemas": {
     "GooglePlayDeveloperReportingV1alpha1Anomaly": {
@@ -1199,6 +1199,10 @@
       "description": "An error report received for an app. There reports are produced by the Android platform code when a (potentially fatal) error condition is detected. Identical reports from many users will be deduplicated and coalesced into a single ErrorReport. **Required permissions**: to access this resource, the calling user needs the _View app information (read-only)_ permission for the app.",
       "id": "GooglePlayDeveloperReportingV1alpha1ErrorReport",
       "properties": {
+        "appVersion": {
+          "$ref": "GooglePlayDeveloperReportingV1alpha1AppVersion",
+          "description": "The app version on which an event in this error report occurred on."
+        },
         "deviceModel": {
           "$ref": "GooglePlayDeveloperReportingV1alpha1DeviceModelSummary",
           "description": "A device model on which an event in this error report occurred on."
@@ -1237,6 +1241,10 @@
             "Crash caused by an unhandled exception in Java (or Kotlin or any other JVM language) or a signal in native code such as SIGSEGV."
           ],
           "type": "string"
+        },
+        "vcsInformation": {
+          "description": "Version control system information from BUNDLE-METADATA/version-control-info.textproto or META-INF/version-control-info.textproto of the app bundle or APK, respectively.",
+          "type": "string"
         }
       },
       "type": "object"
diff --git a/googleapiclient/discovery_cache/documents/playdeveloperreporting.v1beta1.json b/googleapiclient/discovery_cache/documents/playdeveloperreporting.v1beta1.json
index ac4eb01a46f..4e3d3b4f335 100644
--- a/googleapiclient/discovery_cache/documents/playdeveloperreporting.v1beta1.json
+++ b/googleapiclient/discovery_cache/documents/playdeveloperreporting.v1beta1.json
@@ -941,7 +941,7 @@
       }
     }
   },
-  "revision": "20231205",
+  "revision": "20231211",
   "rootUrl": "https://playdeveloperreporting.googleapis.com/",
   "schemas": {
     "GooglePlayDeveloperReportingV1beta1Anomaly": {
@@ -1199,6 +1199,10 @@
       "description": "An error report received for an app. There reports are produced by the Android platform code when a (potentially fatal) error condition is detected. Identical reports from many users will be deduplicated and coalesced into a single ErrorReport. **Required permissions**: to access this resource, the calling user needs the _View app information (read-only)_ permission for the app.",
       "id": "GooglePlayDeveloperReportingV1beta1ErrorReport",
       "properties": {
+        "appVersion": {
+          "$ref": "GooglePlayDeveloperReportingV1beta1AppVersion",
+          "description": "The app version on which an event in this error report occurred on."
+        },
         "deviceModel": {
           "$ref": "GooglePlayDeveloperReportingV1beta1DeviceModelSummary",
           "description": "A device model on which an event in this error report occurred on."
@@ -1237,6 +1241,10 @@
             "Crash caused by an unhandled exception in Java (or Kotlin or any other JVM language) or a signal in native code such as SIGSEGV."
           ],
           "type": "string"
+        },
+        "vcsInformation": {
+          "description": "Version control system information from BUNDLE-METADATA/version-control-info.textproto or META-INF/version-control-info.textproto of the app bundle or APK, respectively.",
+          "type": "string"
         }
       },
       "type": "object"
diff --git a/googleapiclient/discovery_cache/documents/playgrouping.v1alpha1.json b/googleapiclient/discovery_cache/documents/playgrouping.v1alpha1.json
index ae9da4e58da..d64ddafc3b1 100644
--- a/googleapiclient/discovery_cache/documents/playgrouping.v1alpha1.json
+++ b/googleapiclient/discovery_cache/documents/playgrouping.v1alpha1.json
@@ -177,7 +177,7 @@
       }
     }
   },
-  "revision": "20231205",
+  "revision": "20231210",
   "rootUrl": "https://playgrouping.googleapis.com/",
   "schemas": {
     "CreateOrUpdateTagsRequest": {
diff --git a/googleapiclient/discovery_cache/documents/playintegrity.v1.json b/googleapiclient/discovery_cache/documents/playintegrity.v1.json
index 64792fbb129..7214346ff88 100644
--- a/googleapiclient/discovery_cache/documents/playintegrity.v1.json
+++ b/googleapiclient/discovery_cache/documents/playintegrity.v1.json
@@ -138,7 +138,7 @@
       }
     }
   },
-  "revision": "20231205",
+  "revision": "20231211",
   "rootUrl": "https://playintegrity.googleapis.com/",
   "schemas": {
     "AccountActivity": {
diff --git a/googleapiclient/discovery_cache/documents/policyanalyzer.v1.json b/googleapiclient/discovery_cache/documents/policyanalyzer.v1.json
index d12986a3c2b..79e5b3a88fe 100644
--- a/googleapiclient/discovery_cache/documents/policyanalyzer.v1.json
+++ b/googleapiclient/discovery_cache/documents/policyanalyzer.v1.json
@@ -163,7 +163,7 @@
       }
     }
   },
-  "revision": "20231203",
+  "revision": "20231210",
   "rootUrl": "https://policyanalyzer.googleapis.com/",
   "schemas": {
     "GoogleCloudPolicyanalyzerV1Activity": {
diff --git a/googleapiclient/discovery_cache/documents/policyanalyzer.v1beta1.json b/googleapiclient/discovery_cache/documents/policyanalyzer.v1beta1.json
index 85e58b0cd00..0cd922e1f9c 100644
--- a/googleapiclient/discovery_cache/documents/policyanalyzer.v1beta1.json
+++ b/googleapiclient/discovery_cache/documents/policyanalyzer.v1beta1.json
@@ -163,7 +163,7 @@
       }
     }
   },
-  "revision": "20231203",
+  "revision": "20231210",
   "rootUrl": "https://policyanalyzer.googleapis.com/",
   "schemas": {
     "GoogleCloudPolicyanalyzerV1beta1Activity": {
diff --git a/googleapiclient/discovery_cache/documents/policytroubleshooter.v1.json b/googleapiclient/discovery_cache/documents/policytroubleshooter.v1.json
index 1010cdb688e..eeffb016f6c 100644
--- a/googleapiclient/discovery_cache/documents/policytroubleshooter.v1.json
+++ b/googleapiclient/discovery_cache/documents/policytroubleshooter.v1.json
@@ -128,7 +128,7 @@
       }
     }
   },
-  "revision": "20231203",
+  "revision": "20231210",
   "rootUrl": "https://policytroubleshooter.googleapis.com/",
   "schemas": {
     "GoogleCloudPolicytroubleshooterV1AccessTuple": {
diff --git a/googleapiclient/discovery_cache/documents/policytroubleshooter.v1beta.json b/googleapiclient/discovery_cache/documents/policytroubleshooter.v1beta.json
index 5e08ec54898..3d875625e7f 100644
--- a/googleapiclient/discovery_cache/documents/policytroubleshooter.v1beta.json
+++ b/googleapiclient/discovery_cache/documents/policytroubleshooter.v1beta.json
@@ -128,7 +128,7 @@
       }
     }
   },
-  "revision": "20231203",
+  "revision": "20231210",
   "rootUrl": "https://policytroubleshooter.googleapis.com/",
   "schemas": {
     "GoogleCloudPolicytroubleshooterV1betaAccessTuple": {
diff --git a/googleapiclient/discovery_cache/documents/privateca.v1.json b/googleapiclient/discovery_cache/documents/privateca.v1.json
index a04a4f73db2..8012ed45a90 100644
--- a/googleapiclient/discovery_cache/documents/privateca.v1.json
+++ b/googleapiclient/discovery_cache/documents/privateca.v1.json
@@ -1605,7 +1605,7 @@
       }
     }
   },
-  "revision": "20231108",
+  "revision": "20231129",
   "rootUrl": "https://privateca.googleapis.com/",
   "schemas": {
     "AccessUrls": {
@@ -2416,7 +2416,7 @@
       "id": "FetchCaCertsResponse",
       "properties": {
         "caCerts": {
-          "description": "The PEM encoded CA certificate chains of all ACTIVE CertificateAuthority resources in this CaPool.",
+          "description": "The PEM encoded CA certificate chains of all Certificate Authorities in this CaPool in the ENABLED, DISABLED, or STAGED states.",
           "items": {
             "$ref": "CertChain"
           },
diff --git a/googleapiclient/discovery_cache/documents/privateca.v1beta1.json b/googleapiclient/discovery_cache/documents/privateca.v1beta1.json
index 6959f1b5645..20b678dba4c 100644
--- a/googleapiclient/discovery_cache/documents/privateca.v1beta1.json
+++ b/googleapiclient/discovery_cache/documents/privateca.v1beta1.json
@@ -580,7 +580,7 @@
       }
     }
   },
-  "revision": "20231108",
+  "revision": "20231129",
   "rootUrl": "https://privateca.googleapis.com/",
   "schemas": {
     "AuditConfig": {
diff --git a/googleapiclient/discovery_cache/documents/publicca.v1.json b/googleapiclient/discovery_cache/documents/publicca.v1.json
index df8533986a6..0fc0e4499e1 100644
--- a/googleapiclient/discovery_cache/documents/publicca.v1.json
+++ b/googleapiclient/discovery_cache/documents/publicca.v1.json
@@ -146,7 +146,7 @@
       }
     }
   },
-  "revision": "20231205",
+  "revision": "20231211",
   "rootUrl": "https://publicca.googleapis.com/",
   "schemas": {
     "ExternalAccountKey": {
diff --git a/googleapiclient/discovery_cache/documents/publicca.v1alpha1.json b/googleapiclient/discovery_cache/documents/publicca.v1alpha1.json
index 18f2d8b1b68..fab22038128 100644
--- a/googleapiclient/discovery_cache/documents/publicca.v1alpha1.json
+++ b/googleapiclient/discovery_cache/documents/publicca.v1alpha1.json
@@ -146,7 +146,7 @@
       }
     }
   },
-  "revision": "20231205",
+  "revision": "20231211",
   "rootUrl": "https://publicca.googleapis.com/",
   "schemas": {
     "ExternalAccountKey": {
diff --git a/googleapiclient/discovery_cache/documents/publicca.v1beta1.json b/googleapiclient/discovery_cache/documents/publicca.v1beta1.json
index 3d0b119a891..bdd04fa4de2 100644
--- a/googleapiclient/discovery_cache/documents/publicca.v1beta1.json
+++ b/googleapiclient/discovery_cache/documents/publicca.v1beta1.json
@@ -146,7 +146,7 @@
       }
     }
   },
-  "revision": "20231205",
+  "revision": "20231211",
   "rootUrl": "https://publicca.googleapis.com/",
   "schemas": {
     "ExternalAccountKey": {
diff --git a/googleapiclient/discovery_cache/documents/pubsub.v1.json b/googleapiclient/discovery_cache/documents/pubsub.v1.json
index c7b6ef7fd2a..7241ac915ef 100644
--- a/googleapiclient/discovery_cache/documents/pubsub.v1.json
+++ b/googleapiclient/discovery_cache/documents/pubsub.v1.json
@@ -1573,7 +1573,7 @@
       }
     }
   },
-  "revision": "20231128",
+  "revision": "20231205",
   "rootUrl": "https://pubsub.googleapis.com/",
   "schemas": {
     "AcknowledgeRequest": {
@@ -1634,8 +1634,12 @@
           "description": "Optional. The name of the table to which to write data, of the form {projectId}.{datasetId}.{tableId}",
           "type": "string"
         },
+        "useTableSchema": {
+          "description": "Optional. When true, use the BigQuery table's schema as the columns to write to in BigQuery. `use_table_schema` and `use_topic_schema` cannot be enabled at the same time.",
+          "type": "boolean"
+        },
         "useTopicSchema": {
-          "description": "Optional. When true, use the topic's schema as the columns to write to in BigQuery, if it exists.",
+          "description": "Optional. When true, use the topic's schema as the columns to write to in BigQuery, if it exists. `use_topic_schema` and `use_table_schema` cannot be enabled at the same time.",
           "type": "boolean"
         },
         "writeMetadata": {
diff --git a/googleapiclient/discovery_cache/documents/pubsub.v1beta1a.json b/googleapiclient/discovery_cache/documents/pubsub.v1beta1a.json
index 820dbfc5de9..0a066ca02e9 100644
--- a/googleapiclient/discovery_cache/documents/pubsub.v1beta1a.json
+++ b/googleapiclient/discovery_cache/documents/pubsub.v1beta1a.json
@@ -464,7 +464,7 @@
       }
     }
   },
-  "revision": "20231128",
+  "revision": "20231205",
   "rootUrl": "https://pubsub.googleapis.com/",
   "schemas": {
     "AcknowledgeRequest": {
diff --git a/googleapiclient/discovery_cache/documents/pubsub.v1beta2.json b/googleapiclient/discovery_cache/documents/pubsub.v1beta2.json
index 0a27127132d..da96803cfc8 100644
--- a/googleapiclient/discovery_cache/documents/pubsub.v1beta2.json
+++ b/googleapiclient/discovery_cache/documents/pubsub.v1beta2.json
@@ -731,7 +731,7 @@
       }
     }
   },
-  "revision": "20231128",
+  "revision": "20231205",
   "rootUrl": "https://pubsub.googleapis.com/",
   "schemas": {
     "AcknowledgeRequest": {
diff --git a/googleapiclient/discovery_cache/documents/pubsublite.v1.json b/googleapiclient/discovery_cache/documents/pubsublite.v1.json
index ddda3599412..858629b13ea 100644
--- a/googleapiclient/discovery_cache/documents/pubsublite.v1.json
+++ b/googleapiclient/discovery_cache/documents/pubsublite.v1.json
@@ -1040,7 +1040,7 @@
       }
     }
   },
-  "revision": "20231124",
+  "revision": "20231202",
   "rootUrl": "https://pubsublite.googleapis.com/",
   "schemas": {
     "CancelOperationRequest": {
diff --git a/googleapiclient/discovery_cache/documents/readerrevenuesubscriptionlinking.v1.json b/googleapiclient/discovery_cache/documents/readerrevenuesubscriptionlinking.v1.json
index bdfc6357ca7..dfc9161337c 100644
--- a/googleapiclient/discovery_cache/documents/readerrevenuesubscriptionlinking.v1.json
+++ b/googleapiclient/discovery_cache/documents/readerrevenuesubscriptionlinking.v1.json
@@ -207,7 +207,7 @@
       }
     }
   },
-  "revision": "20231205",
+  "revision": "20231210",
   "rootUrl": "https://readerrevenuesubscriptionlinking.googleapis.com/",
   "schemas": {
     "DeleteReaderResponse": {
diff --git a/googleapiclient/discovery_cache/documents/realtimebidding.v1.json b/googleapiclient/discovery_cache/documents/realtimebidding.v1.json
index 4bd288910ac..a2a77197797 100644
--- a/googleapiclient/discovery_cache/documents/realtimebidding.v1.json
+++ b/googleapiclient/discovery_cache/documents/realtimebidding.v1.json
@@ -1305,7 +1305,7 @@
       }
     }
   },
-  "revision": "20231206",
+  "revision": "20231211",
   "rootUrl": "https://realtimebidding.googleapis.com/",
   "schemas": {
     "ActivatePretargetingConfigRequest": {
diff --git a/googleapiclient/discovery_cache/documents/recommendationengine.v1beta1.json b/googleapiclient/discovery_cache/documents/recommendationengine.v1beta1.json
index 36e0f340900..b42054313ac 100644
--- a/googleapiclient/discovery_cache/documents/recommendationengine.v1beta1.json
+++ b/googleapiclient/discovery_cache/documents/recommendationengine.v1beta1.json
@@ -841,7 +841,7 @@
       }
     }
   },
-  "revision": "20231109",
+  "revision": "20231130",
   "rootUrl": "https://recommendationengine.googleapis.com/",
   "schemas": {
     "GoogleApiHttpBody": {
diff --git a/googleapiclient/discovery_cache/documents/redis.v1.json b/googleapiclient/discovery_cache/documents/redis.v1.json
index 6dab4b9c1a8..16fd1a227f0 100644
--- a/googleapiclient/discovery_cache/documents/redis.v1.json
+++ b/googleapiclient/discovery_cache/documents/redis.v1.json
@@ -821,7 +821,7 @@
       }
     }
   },
-  "revision": "20231129",
+  "revision": "20231206",
   "rootUrl": "https://redis.googleapis.com/",
   "schemas": {
     "CertChain": {
diff --git a/googleapiclient/discovery_cache/documents/redis.v1beta1.json b/googleapiclient/discovery_cache/documents/redis.v1beta1.json
index 358bdc84f7c..36ccbe3569f 100644
--- a/googleapiclient/discovery_cache/documents/redis.v1beta1.json
+++ b/googleapiclient/discovery_cache/documents/redis.v1beta1.json
@@ -821,7 +821,7 @@
       }
     }
   },
-  "revision": "20231129",
+  "revision": "20231206",
   "rootUrl": "https://redis.googleapis.com/",
   "schemas": {
     "CertChain": {
diff --git a/googleapiclient/discovery_cache/documents/reseller.v1.json b/googleapiclient/discovery_cache/documents/reseller.v1.json
index 6311926fab4..48383bb28df 100644
--- a/googleapiclient/discovery_cache/documents/reseller.v1.json
+++ b/googleapiclient/discovery_cache/documents/reseller.v1.json
@@ -651,7 +651,7 @@
       }
     }
   },
-  "revision": "20231106",
+  "revision": "20231210",
   "rootUrl": "https://reseller.googleapis.com/",
   "schemas": {
     "Address": {
diff --git a/googleapiclient/discovery_cache/documents/resourcesettings.v1.json b/googleapiclient/discovery_cache/documents/resourcesettings.v1.json
index 168c3e1e7a8..31ad27847b6 100644
--- a/googleapiclient/discovery_cache/documents/resourcesettings.v1.json
+++ b/googleapiclient/discovery_cache/documents/resourcesettings.v1.json
@@ -499,7 +499,7 @@
       }
     }
   },
-  "revision": "20231205",
+  "revision": "20231211",
   "rootUrl": "https://resourcesettings.googleapis.com/",
   "schemas": {
     "GoogleCloudResourcesettingsV1ListSettingsResponse": {
diff --git a/googleapiclient/discovery_cache/documents/script.v1.json b/googleapiclient/discovery_cache/documents/script.v1.json
index d34b31f9995..7e28a86bc50 100644
--- a/googleapiclient/discovery_cache/documents/script.v1.json
+++ b/googleapiclient/discovery_cache/documents/script.v1.json
@@ -891,7 +891,7 @@
       }
     }
   },
-  "revision": "20231126",
+  "revision": "20231203",
   "rootUrl": "https://script.googleapis.com/",
   "schemas": {
     "Content": {
@@ -905,6 +905,10 @@
           },
           "type": "array"
         },
+        "revertFlumeInvoked": {
+          "description": "Set to true if called from revert flume to allow deletion of system generated manifest file while validating content request. This value is false by default.",
+          "type": "boolean"
+        },
         "scriptId": {
           "description": "The script project's Drive ID.",
           "type": "string"
diff --git a/googleapiclient/discovery_cache/documents/searchconsole.v1.json b/googleapiclient/discovery_cache/documents/searchconsole.v1.json
index eab4c4bb5cb..a8f3e228ad1 100644
--- a/googleapiclient/discovery_cache/documents/searchconsole.v1.json
+++ b/googleapiclient/discovery_cache/documents/searchconsole.v1.json
@@ -400,7 +400,7 @@
       }
     }
   },
-  "revision": "20231205",
+  "revision": "20231211",
   "rootUrl": "https://searchconsole.googleapis.com/",
   "schemas": {
     "AmpInspectionResult": {
diff --git a/googleapiclient/discovery_cache/documents/securitycenter.v1.json b/googleapiclient/discovery_cache/documents/securitycenter.v1.json
index ddb85507a7f..ef128c26481 100644
--- a/googleapiclient/discovery_cache/documents/securitycenter.v1.json
+++ b/googleapiclient/discovery_cache/documents/securitycenter.v1.json
@@ -402,6 +402,293 @@
             }
           }
         },
+        "eventThreatDetectionSettings": {
+          "methods": {
+            "validateCustomModule": {
+              "description": "Validates the given Event Threat Detection custom module.",
+              "flatPath": "v1/folders/{foldersId}/eventThreatDetectionSettings:validateCustomModule",
+              "httpMethod": "POST",
+              "id": "securitycenter.folders.eventThreatDetectionSettings.validateCustomModule",
+              "parameterOrder": [
+                "parent"
+              ],
+              "parameters": {
+                "parent": {
+                  "description": "Required. Resource name of the parent to validate the Custom Module under. Its format is: * \"organizations/{organization}/eventThreatDetectionSettings\". * \"folders/{folder}/eventThreatDetectionSettings\". * \"projects/{project}/eventThreatDetectionSettings\".",
+                  "location": "path",
+                  "pattern": "^folders/[^/]+/eventThreatDetectionSettings$",
+                  "required": true,
+                  "type": "string"
+                }
+              },
+              "path": "v1/{+parent}:validateCustomModule",
+              "request": {
+                "$ref": "ValidateEventThreatDetectionCustomModuleRequest"
+              },
+              "response": {
+                "$ref": "ValidateEventThreatDetectionCustomModuleResponse"
+              },
+              "scopes": [
+                "https://www.googleapis.com/auth/cloud-platform"
+              ]
+            }
+          },
+          "resources": {
+            "customModules": {
+              "methods": {
+                "create": {
+                  "description": "Creates a resident Event Threat Detection custom module at the scope of the given Resource Manager parent, and also creates inherited custom modules for all descendants of the given parent. These modules are enabled by default.",
+                  "flatPath": "v1/folders/{foldersId}/eventThreatDetectionSettings/customModules",
+                  "httpMethod": "POST",
+                  "id": "securitycenter.folders.eventThreatDetectionSettings.customModules.create",
+                  "parameterOrder": [
+                    "parent"
+                  ],
+                  "parameters": {
+                    "parent": {
+                      "description": "Required. The new custom module's parent. Its format is: * \"organizations/{organization}/eventThreatDetectionSettings\". * \"folders/{folder}/eventThreatDetectionSettings\". * \"projects/{project}/eventThreatDetectionSettings\".",
+                      "location": "path",
+                      "pattern": "^folders/[^/]+/eventThreatDetectionSettings$",
+                      "required": true,
+                      "type": "string"
+                    }
+                  },
+                  "path": "v1/{+parent}/customModules",
+                  "request": {
+                    "$ref": "EventThreatDetectionCustomModule"
+                  },
+                  "response": {
+                    "$ref": "EventThreatDetectionCustomModule"
+                  },
+                  "scopes": [
+                    "https://www.googleapis.com/auth/cloud-platform"
+                  ]
+                },
+                "delete": {
+                  "description": "Deletes the specified Event Threat Detection custom module and all of its descendants in the Resource Manager hierarchy. This method is only supported for resident custom modules.",
+                  "flatPath": "v1/folders/{foldersId}/eventThreatDetectionSettings/customModules/{customModulesId}",
+                  "httpMethod": "DELETE",
+                  "id": "securitycenter.folders.eventThreatDetectionSettings.customModules.delete",
+                  "parameterOrder": [
+                    "name"
+                  ],
+                  "parameters": {
+                    "name": {
+                      "description": "Required. Name of the custom module to delete. Its format is: * \"organizations/{organization}/eventThreatDetectionSettings/customModules/{module}\". * \"folders/{folder}/eventThreatDetectionSettings/customModules/{module}\". * \"projects/{project}/eventThreatDetectionSettings/customModules/{module}\".",
+                      "location": "path",
+                      "pattern": "^folders/[^/]+/eventThreatDetectionSettings/customModules/[^/]+$",
+                      "required": true,
+                      "type": "string"
+                    }
+                  },
+                  "path": "v1/{+name}",
+                  "response": {
+                    "$ref": "Empty"
+                  },
+                  "scopes": [
+                    "https://www.googleapis.com/auth/cloud-platform"
+                  ]
+                },
+                "get": {
+                  "description": "Gets an Event Threat Detection custom module.",
+                  "flatPath": "v1/folders/{foldersId}/eventThreatDetectionSettings/customModules/{customModulesId}",
+                  "httpMethod": "GET",
+                  "id": "securitycenter.folders.eventThreatDetectionSettings.customModules.get",
+                  "parameterOrder": [
+                    "name"
+                  ],
+                  "parameters": {
+                    "name": {
+                      "description": "Required. Name of the custom module to get. Its format is: * \"organizations/{organization}/eventThreatDetectionSettings/customModules/{module}\". * \"folders/{folder}/eventThreatDetectionSettings/customModules/{module}\". * \"projects/{project}/eventThreatDetectionSettings/customModules/{module}\".",
+                      "location": "path",
+                      "pattern": "^folders/[^/]+/eventThreatDetectionSettings/customModules/[^/]+$",
+                      "required": true,
+                      "type": "string"
+                    }
+                  },
+                  "path": "v1/{+name}",
+                  "response": {
+                    "$ref": "EventThreatDetectionCustomModule"
+                  },
+                  "scopes": [
+                    "https://www.googleapis.com/auth/cloud-platform"
+                  ]
+                },
+                "list": {
+                  "description": "Lists all Event Threat Detection custom modules for the given Resource Manager parent. This includes resident modules defined at the scope of the parent along with modules inherited from ancestors.",
+                  "flatPath": "v1/folders/{foldersId}/eventThreatDetectionSettings/customModules",
+                  "httpMethod": "GET",
+                  "id": "securitycenter.folders.eventThreatDetectionSettings.customModules.list",
+                  "parameterOrder": [
+                    "parent"
+                  ],
+                  "parameters": {
+                    "pageSize": {
+                      "description": "The maximum number of modules to return. The service may return fewer than this value. If unspecified, at most 10 configs will be returned. The maximum value is 1000; values above 1000 will be coerced to 1000.",
+                      "format": "int32",
+                      "location": "query",
+                      "type": "integer"
+                    },
+                    "pageToken": {
+                      "description": "A page token, received from a previous `ListEventThreatDetectionCustomModules` call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to `ListEventThreatDetectionCustomModules` must match the call that provided the page token.",
+                      "location": "query",
+                      "type": "string"
+                    },
+                    "parent": {
+                      "description": "Required. Name of the parent to list custom modules under. Its format is: * \"organizations/{organization}/eventThreatDetectionSettings\". * \"folders/{folder}/eventThreatDetectionSettings\". * \"projects/{project}/eventThreatDetectionSettings\".",
+                      "location": "path",
+                      "pattern": "^folders/[^/]+/eventThreatDetectionSettings$",
+                      "required": true,
+                      "type": "string"
+                    }
+                  },
+                  "path": "v1/{+parent}/customModules",
+                  "response": {
+                    "$ref": "ListEventThreatDetectionCustomModulesResponse"
+                  },
+                  "scopes": [
+                    "https://www.googleapis.com/auth/cloud-platform"
+                  ]
+                },
+                "listDescendant": {
+                  "description": "Lists all resident Event Threat Detection custom modules under the given Resource Manager parent and its descendants.",
+                  "flatPath": "v1/folders/{foldersId}/eventThreatDetectionSettings/customModules:listDescendant",
+                  "httpMethod": "GET",
+                  "id": "securitycenter.folders.eventThreatDetectionSettings.customModules.listDescendant",
+                  "parameterOrder": [
+                    "parent"
+                  ],
+                  "parameters": {
+                    "pageSize": {
+                      "description": "The maximum number of modules to return. The service may return fewer than this value. If unspecified, at most 10 configs will be returned. The maximum value is 1000; values above 1000 will be coerced to 1000.",
+                      "format": "int32",
+                      "location": "query",
+                      "type": "integer"
+                    },
+                    "pageToken": {
+                      "description": "A page token, received from a previous `ListDescendantEventThreatDetectionCustomModules` call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to `ListDescendantEventThreatDetectionCustomModules` must match the call that provided the page token.",
+                      "location": "query",
+                      "type": "string"
+                    },
+                    "parent": {
+                      "description": "Required. Name of the parent to list custom modules under. Its format is: * \"organizations/{organization}/eventThreatDetectionSettings\". * \"folders/{folder}/eventThreatDetectionSettings\". * \"projects/{project}/eventThreatDetectionSettings\".",
+                      "location": "path",
+                      "pattern": "^folders/[^/]+/eventThreatDetectionSettings$",
+                      "required": true,
+                      "type": "string"
+                    }
+                  },
+                  "path": "v1/{+parent}/customModules:listDescendant",
+                  "response": {
+                    "$ref": "ListDescendantEventThreatDetectionCustomModulesResponse"
+                  },
+                  "scopes": [
+                    "https://www.googleapis.com/auth/cloud-platform"
+                  ]
+                },
+                "patch": {
+                  "description": "Updates the Event Threat Detection custom module with the given name based on the given update mask. Updating the enablement state is supported for both resident and inherited modules (though resident modules cannot have an enablement state of \"inherited\"). Updating the display name or configuration of a module is supported for resident modules only. The type of a module cannot be changed.",
+                  "flatPath": "v1/folders/{foldersId}/eventThreatDetectionSettings/customModules/{customModulesId}",
+                  "httpMethod": "PATCH",
+                  "id": "securitycenter.folders.eventThreatDetectionSettings.customModules.patch",
+                  "parameterOrder": [
+                    "name"
+                  ],
+                  "parameters": {
+                    "name": {
+                      "description": "Immutable. The resource name of the Event Threat Detection custom module. Its format is: * \"organizations/{organization}/eventThreatDetectionSettings/customModules/{module}\". * \"folders/{folder}/eventThreatDetectionSettings/customModules/{module}\". * \"projects/{project}/eventThreatDetectionSettings/customModules/{module}\".",
+                      "location": "path",
+                      "pattern": "^folders/[^/]+/eventThreatDetectionSettings/customModules/[^/]+$",
+                      "required": true,
+                      "type": "string"
+                    },
+                    "updateMask": {
+                      "description": "The list of fields to be updated. If empty all mutable fields will be updated.",
+                      "format": "google-fieldmask",
+                      "location": "query",
+                      "type": "string"
+                    }
+                  },
+                  "path": "v1/{+name}",
+                  "request": {
+                    "$ref": "EventThreatDetectionCustomModule"
+                  },
+                  "response": {
+                    "$ref": "EventThreatDetectionCustomModule"
+                  },
+                  "scopes": [
+                    "https://www.googleapis.com/auth/cloud-platform"
+                  ]
+                }
+              }
+            },
+            "effectiveCustomModules": {
+              "methods": {
+                "get": {
+                  "description": "Gets an effective Event Threat Detection custom module at the given level.",
+                  "flatPath": "v1/folders/{foldersId}/eventThreatDetectionSettings/effectiveCustomModules/{effectiveCustomModulesId}",
+                  "httpMethod": "GET",
+                  "id": "securitycenter.folders.eventThreatDetectionSettings.effectiveCustomModules.get",
+                  "parameterOrder": [
+                    "name"
+                  ],
+                  "parameters": {
+                    "name": {
+                      "description": "Required. The resource name of the effective Event Threat Detection custom module. Its format is: * \"organizations/{organization}/eventThreatDetectionSettings/effectiveCustomModules/{module}\". * \"folders/{folder}/eventThreatDetectionSettings/effectiveCustomModules/{module}\". * \"projects/{project}/eventThreatDetectionSettings/effectiveCustomModules/{module}\".",
+                      "location": "path",
+                      "pattern": "^folders/[^/]+/eventThreatDetectionSettings/effectiveCustomModules/[^/]+$",
+                      "required": true,
+                      "type": "string"
+                    }
+                  },
+                  "path": "v1/{+name}",
+                  "response": {
+                    "$ref": "EffectiveEventThreatDetectionCustomModule"
+                  },
+                  "scopes": [
+                    "https://www.googleapis.com/auth/cloud-platform"
+                  ]
+                },
+                "list": {
+                  "description": "Lists all effective Event Threat Detection custom modules for the given parent. This includes resident modules defined at the scope of the parent along with modules inherited from its ancestors.",
+                  "flatPath": "v1/folders/{foldersId}/eventThreatDetectionSettings/effectiveCustomModules",
+                  "httpMethod": "GET",
+                  "id": "securitycenter.folders.eventThreatDetectionSettings.effectiveCustomModules.list",
+                  "parameterOrder": [
+                    "parent"
+                  ],
+                  "parameters": {
+                    "pageSize": {
+                      "description": "The maximum number of modules to return. The service may return fewer than this value. If unspecified, at most 10 configs will be returned. The maximum value is 1000; values above 1000 will be coerced to 1000.",
+                      "format": "int32",
+                      "location": "query",
+                      "type": "integer"
+                    },
+                    "pageToken": {
+                      "description": "A page token, received from a previous `ListEffectiveEventThreatDetectionCustomModules` call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to `ListEffectiveEventThreatDetectionCustomModules` must match the call that provided the page token.",
+                      "location": "query",
+                      "type": "string"
+                    },
+                    "parent": {
+                      "description": "Required. Name of the parent to list custom modules for. Its format is: * \"organizations/{organization}/eventThreatDetectionSettings\". * \"folders/{folder}/eventThreatDetectionSettings\". * \"projects/{project}/eventThreatDetectionSettings\".",
+                      "location": "path",
+                      "pattern": "^folders/[^/]+/eventThreatDetectionSettings$",
+                      "required": true,
+                      "type": "string"
+                    }
+                  },
+                  "path": "v1/{+parent}/effectiveCustomModules",
+                  "response": {
+                    "$ref": "ListEffectiveEventThreatDetectionCustomModulesResponse"
+                  },
+                  "scopes": [
+                    "https://www.googleapis.com/auth/cloud-platform"
+                  ]
+                }
+              }
+            }
+          }
+        },
         "findings": {
           "methods": {
             "bulkMute": {
@@ -1011,7 +1298,7 @@
                       "type": "string"
                     },
                     "updateMask": {
-                      "description": "The list of fields to update.",
+                      "description": "The list of fields to be updated. The only fields that can be updated are `enablement_state` and `custom_config`. If empty or set to the wildcard value `*`, both `enablement_state` and `custom_config` are updated.",
                       "format": "google-fieldmask",
                       "location": "query",
                       "type": "string"
@@ -1834,7 +2121,7 @@
               ],
               "parameters": {
                 "parent": {
-                  "description": "Required. Resource name of the parent to validate the Custom Module under. Its format is: * \"organizations/{organization}/eventThreatDetectionSettings\".",
+                  "description": "Required. Resource name of the parent to validate the Custom Module under. Its format is: * \"organizations/{organization}/eventThreatDetectionSettings\". * \"folders/{folder}/eventThreatDetectionSettings\". * \"projects/{project}/eventThreatDetectionSettings\".",
                   "location": "path",
                   "pattern": "^organizations/[^/]+/eventThreatDetectionSettings$",
                   "required": true,
@@ -1857,7 +2144,7 @@
             "customModules": {
               "methods": {
                 "create": {
-                  "description": "Creates an Event Threat Detection custom module.",
+                  "description": "Creates a resident Event Threat Detection custom module at the scope of the given Resource Manager parent, and also creates inherited custom modules for all descendants of the given parent. These modules are enabled by default.",
                   "flatPath": "v1/organizations/{organizationsId}/eventThreatDetectionSettings/customModules",
                   "httpMethod": "POST",
                   "id": "securitycenter.organizations.eventThreatDetectionSettings.customModules.create",
@@ -1866,7 +2153,7 @@
                   ],
                   "parameters": {
                     "parent": {
-                      "description": "Required. The new custom module's parent. Its format is: * \"organizations/{organization}/eventThreatDetectionSettings\".",
+                      "description": "Required. The new custom module's parent. Its format is: * \"organizations/{organization}/eventThreatDetectionSettings\". * \"folders/{folder}/eventThreatDetectionSettings\". * \"projects/{project}/eventThreatDetectionSettings\".",
                       "location": "path",
                       "pattern": "^organizations/[^/]+/eventThreatDetectionSettings$",
                       "required": true,
@@ -1885,7 +2172,7 @@
                   ]
                 },
                 "delete": {
-                  "description": "Deletes an Event Threat Detection custom module.",
+                  "description": "Deletes the specified Event Threat Detection custom module and all of its descendants in the Resource Manager hierarchy. This method is only supported for resident custom modules.",
                   "flatPath": "v1/organizations/{organizationsId}/eventThreatDetectionSettings/customModules/{customModulesId}",
                   "httpMethod": "DELETE",
                   "id": "securitycenter.organizations.eventThreatDetectionSettings.customModules.delete",
@@ -1894,7 +2181,7 @@
                   ],
                   "parameters": {
                     "name": {
-                      "description": "Required. Name of the custom module to delete. Its format is: * \"organizations/{organization}/eventThreatDetectionSettings/customModules/{module}\".",
+                      "description": "Required. Name of the custom module to delete. Its format is: * \"organizations/{organization}/eventThreatDetectionSettings/customModules/{module}\". * \"folders/{folder}/eventThreatDetectionSettings/customModules/{module}\". * \"projects/{project}/eventThreatDetectionSettings/customModules/{module}\".",
                       "location": "path",
                       "pattern": "^organizations/[^/]+/eventThreatDetectionSettings/customModules/[^/]+$",
                       "required": true,
@@ -1919,7 +2206,7 @@
                   ],
                   "parameters": {
                     "name": {
-                      "description": "Required. Name of the custom module to get. Its format is: * \"organizations/{organization}/eventThreatDetectionSettings/customModules/{module}\".",
+                      "description": "Required. Name of the custom module to get. Its format is: * \"organizations/{organization}/eventThreatDetectionSettings/customModules/{module}\". * \"folders/{folder}/eventThreatDetectionSettings/customModules/{module}\". * \"projects/{project}/eventThreatDetectionSettings/customModules/{module}\".",
                       "location": "path",
                       "pattern": "^organizations/[^/]+/eventThreatDetectionSettings/customModules/[^/]+$",
                       "required": true,
@@ -1935,7 +2222,7 @@
                   ]
                 },
                 "list": {
-                  "description": "Lists Event Threat Detection custom modules.",
+                  "description": "Lists all Event Threat Detection custom modules for the given Resource Manager parent. This includes resident modules defined at the scope of the parent along with modules inherited from ancestors.",
                   "flatPath": "v1/organizations/{organizationsId}/eventThreatDetectionSettings/customModules",
                   "httpMethod": "GET",
                   "id": "securitycenter.organizations.eventThreatDetectionSettings.customModules.list",
@@ -1955,7 +2242,7 @@
                       "type": "string"
                     },
                     "parent": {
-                      "description": "Required. Name of the parent to list custom modules under. Its format is: * \"organizations/{organization}/eventThreatDetectionSettings\".",
+                      "description": "Required. Name of the parent to list custom modules under. Its format is: * \"organizations/{organization}/eventThreatDetectionSettings\". * \"folders/{folder}/eventThreatDetectionSettings\". * \"projects/{project}/eventThreatDetectionSettings\".",
                       "location": "path",
                       "pattern": "^organizations/[^/]+/eventThreatDetectionSettings$",
                       "required": true,
@@ -1970,8 +2257,44 @@
                     "https://www.googleapis.com/auth/cloud-platform"
                   ]
                 },
+                "listDescendant": {
+                  "description": "Lists all resident Event Threat Detection custom modules under the given Resource Manager parent and its descendants.",
+                  "flatPath": "v1/organizations/{organizationsId}/eventThreatDetectionSettings/customModules:listDescendant",
+                  "httpMethod": "GET",
+                  "id": "securitycenter.organizations.eventThreatDetectionSettings.customModules.listDescendant",
+                  "parameterOrder": [
+                    "parent"
+                  ],
+                  "parameters": {
+                    "pageSize": {
+                      "description": "The maximum number of modules to return. The service may return fewer than this value. If unspecified, at most 10 configs will be returned. The maximum value is 1000; values above 1000 will be coerced to 1000.",
+                      "format": "int32",
+                      "location": "query",
+                      "type": "integer"
+                    },
+                    "pageToken": {
+                      "description": "A page token, received from a previous `ListDescendantEventThreatDetectionCustomModules` call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to `ListDescendantEventThreatDetectionCustomModules` must match the call that provided the page token.",
+                      "location": "query",
+                      "type": "string"
+                    },
+                    "parent": {
+                      "description": "Required. Name of the parent to list custom modules under. Its format is: * \"organizations/{organization}/eventThreatDetectionSettings\". * \"folders/{folder}/eventThreatDetectionSettings\". * \"projects/{project}/eventThreatDetectionSettings\".",
+                      "location": "path",
+                      "pattern": "^organizations/[^/]+/eventThreatDetectionSettings$",
+                      "required": true,
+                      "type": "string"
+                    }
+                  },
+                  "path": "v1/{+parent}/customModules:listDescendant",
+                  "response": {
+                    "$ref": "ListDescendantEventThreatDetectionCustomModulesResponse"
+                  },
+                  "scopes": [
+                    "https://www.googleapis.com/auth/cloud-platform"
+                  ]
+                },
                 "patch": {
-                  "description": "Updates an Event Threat Detection custom module.",
+                  "description": "Updates the Event Threat Detection custom module with the given name based on the given update mask. Updating the enablement state is supported for both resident and inherited modules (though resident modules cannot have an enablement state of \"inherited\"). Updating the display name or configuration of a module is supported for resident modules only. The type of a module cannot be changed.",
                   "flatPath": "v1/organizations/{organizationsId}/eventThreatDetectionSettings/customModules/{customModulesId}",
                   "httpMethod": "PATCH",
                   "id": "securitycenter.organizations.eventThreatDetectionSettings.customModules.patch",
@@ -2005,6 +2328,71 @@
                   ]
                 }
               }
+            },
+            "effectiveCustomModules": {
+              "methods": {
+                "get": {
+                  "description": "Gets an effective Event Threat Detection custom module at the given level.",
+                  "flatPath": "v1/organizations/{organizationsId}/eventThreatDetectionSettings/effectiveCustomModules/{effectiveCustomModulesId}",
+                  "httpMethod": "GET",
+                  "id": "securitycenter.organizations.eventThreatDetectionSettings.effectiveCustomModules.get",
+                  "parameterOrder": [
+                    "name"
+                  ],
+                  "parameters": {
+                    "name": {
+                      "description": "Required. The resource name of the effective Event Threat Detection custom module. Its format is: * \"organizations/{organization}/eventThreatDetectionSettings/effectiveCustomModules/{module}\". * \"folders/{folder}/eventThreatDetectionSettings/effectiveCustomModules/{module}\". * \"projects/{project}/eventThreatDetectionSettings/effectiveCustomModules/{module}\".",
+                      "location": "path",
+                      "pattern": "^organizations/[^/]+/eventThreatDetectionSettings/effectiveCustomModules/[^/]+$",
+                      "required": true,
+                      "type": "string"
+                    }
+                  },
+                  "path": "v1/{+name}",
+                  "response": {
+                    "$ref": "EffectiveEventThreatDetectionCustomModule"
+                  },
+                  "scopes": [
+                    "https://www.googleapis.com/auth/cloud-platform"
+                  ]
+                },
+                "list": {
+                  "description": "Lists all effective Event Threat Detection custom modules for the given parent. This includes resident modules defined at the scope of the parent along with modules inherited from its ancestors.",
+                  "flatPath": "v1/organizations/{organizationsId}/eventThreatDetectionSettings/effectiveCustomModules",
+                  "httpMethod": "GET",
+                  "id": "securitycenter.organizations.eventThreatDetectionSettings.effectiveCustomModules.list",
+                  "parameterOrder": [
+                    "parent"
+                  ],
+                  "parameters": {
+                    "pageSize": {
+                      "description": "The maximum number of modules to return. The service may return fewer than this value. If unspecified, at most 10 configs will be returned. The maximum value is 1000; values above 1000 will be coerced to 1000.",
+                      "format": "int32",
+                      "location": "query",
+                      "type": "integer"
+                    },
+                    "pageToken": {
+                      "description": "A page token, received from a previous `ListEffectiveEventThreatDetectionCustomModules` call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to `ListEffectiveEventThreatDetectionCustomModules` must match the call that provided the page token.",
+                      "location": "query",
+                      "type": "string"
+                    },
+                    "parent": {
+                      "description": "Required. Name of the parent to list custom modules for. Its format is: * \"organizations/{organization}/eventThreatDetectionSettings\". * \"folders/{folder}/eventThreatDetectionSettings\". * \"projects/{project}/eventThreatDetectionSettings\".",
+                      "location": "path",
+                      "pattern": "^organizations/[^/]+/eventThreatDetectionSettings$",
+                      "required": true,
+                      "type": "string"
+                    }
+                  },
+                  "path": "v1/{+parent}/effectiveCustomModules",
+                  "response": {
+                    "$ref": "ListEffectiveEventThreatDetectionCustomModulesResponse"
+                  },
+                  "scopes": [
+                    "https://www.googleapis.com/auth/cloud-platform"
+                  ]
+                }
+              }
             }
           }
         },
@@ -2889,7 +3277,7 @@
                       "type": "string"
                     },
                     "updateMask": {
-                      "description": "The list of fields to update.",
+                      "description": "The list of fields to be updated. The only fields that can be updated are `enablement_state` and `custom_config`. If empty or set to the wildcard value `*`, both `enablement_state` and `custom_config` are updated.",
                       "format": "google-fieldmask",
                       "location": "query",
                       "type": "string"
@@ -4044,60 +4432,347 @@
                   "location": "query",
                   "type": "integer"
                 },
-                "pageToken": {
-                  "description": "A page token, received from a previous `ListBigQueryExports` call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to `ListBigQueryExports` must match the call that provided the page token.",
-                  "location": "query",
-                  "type": "string"
+                "pageToken": {
+                  "description": "A page token, received from a previous `ListBigQueryExports` call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to `ListBigQueryExports` must match the call that provided the page token.",
+                  "location": "query",
+                  "type": "string"
+                },
+                "parent": {
+                  "description": "Required. The parent, which owns the collection of BigQuery exports. Its format is \"organizations/[organization_id]\", \"folders/[folder_id]\", \"projects/[project_id]\".",
+                  "location": "path",
+                  "pattern": "^projects/[^/]+$",
+                  "required": true,
+                  "type": "string"
+                }
+              },
+              "path": "v1/{+parent}/bigQueryExports",
+              "response": {
+                "$ref": "ListBigQueryExportsResponse"
+              },
+              "scopes": [
+                "https://www.googleapis.com/auth/cloud-platform"
+              ]
+            },
+            "patch": {
+              "description": "Updates a BigQuery export.",
+              "flatPath": "v1/projects/{projectsId}/bigQueryExports/{bigQueryExportsId}",
+              "httpMethod": "PATCH",
+              "id": "securitycenter.projects.bigQueryExports.patch",
+              "parameterOrder": [
+                "name"
+              ],
+              "parameters": {
+                "name": {
+                  "description": "The relative resource name of this export. See: https://cloud.google.com/apis/design/resource_names#relative_resource_name. Example format: \"organizations/{organization_id}/bigQueryExports/{export_id}\" Example format: \"folders/{folder_id}/bigQueryExports/{export_id}\" Example format: \"projects/{project_id}/bigQueryExports/{export_id}\" This field is provided in responses, and is ignored when provided in create requests.",
+                  "location": "path",
+                  "pattern": "^projects/[^/]+/bigQueryExports/[^/]+$",
+                  "required": true,
+                  "type": "string"
+                },
+                "updateMask": {
+                  "description": "The list of fields to be updated. If empty all mutable fields will be updated.",
+                  "format": "google-fieldmask",
+                  "location": "query",
+                  "type": "string"
+                }
+              },
+              "path": "v1/{+name}",
+              "request": {
+                "$ref": "GoogleCloudSecuritycenterV1BigQueryExport"
+              },
+              "response": {
+                "$ref": "GoogleCloudSecuritycenterV1BigQueryExport"
+              },
+              "scopes": [
+                "https://www.googleapis.com/auth/cloud-platform"
+              ]
+            }
+          }
+        },
+        "eventThreatDetectionSettings": {
+          "methods": {
+            "validateCustomModule": {
+              "description": "Validates the given Event Threat Detection custom module.",
+              "flatPath": "v1/projects/{projectsId}/eventThreatDetectionSettings:validateCustomModule",
+              "httpMethod": "POST",
+              "id": "securitycenter.projects.eventThreatDetectionSettings.validateCustomModule",
+              "parameterOrder": [
+                "parent"
+              ],
+              "parameters": {
+                "parent": {
+                  "description": "Required. Resource name of the parent to validate the Custom Module under. Its format is: * \"organizations/{organization}/eventThreatDetectionSettings\". * \"folders/{folder}/eventThreatDetectionSettings\". * \"projects/{project}/eventThreatDetectionSettings\".",
+                  "location": "path",
+                  "pattern": "^projects/[^/]+/eventThreatDetectionSettings$",
+                  "required": true,
+                  "type": "string"
+                }
+              },
+              "path": "v1/{+parent}:validateCustomModule",
+              "request": {
+                "$ref": "ValidateEventThreatDetectionCustomModuleRequest"
+              },
+              "response": {
+                "$ref": "ValidateEventThreatDetectionCustomModuleResponse"
+              },
+              "scopes": [
+                "https://www.googleapis.com/auth/cloud-platform"
+              ]
+            }
+          },
+          "resources": {
+            "customModules": {
+              "methods": {
+                "create": {
+                  "description": "Creates a resident Event Threat Detection custom module at the scope of the given Resource Manager parent, and also creates inherited custom modules for all descendants of the given parent. These modules are enabled by default.",
+                  "flatPath": "v1/projects/{projectsId}/eventThreatDetectionSettings/customModules",
+                  "httpMethod": "POST",
+                  "id": "securitycenter.projects.eventThreatDetectionSettings.customModules.create",
+                  "parameterOrder": [
+                    "parent"
+                  ],
+                  "parameters": {
+                    "parent": {
+                      "description": "Required. The new custom module's parent. Its format is: * \"organizations/{organization}/eventThreatDetectionSettings\". * \"folders/{folder}/eventThreatDetectionSettings\". * \"projects/{project}/eventThreatDetectionSettings\".",
+                      "location": "path",
+                      "pattern": "^projects/[^/]+/eventThreatDetectionSettings$",
+                      "required": true,
+                      "type": "string"
+                    }
+                  },
+                  "path": "v1/{+parent}/customModules",
+                  "request": {
+                    "$ref": "EventThreatDetectionCustomModule"
+                  },
+                  "response": {
+                    "$ref": "EventThreatDetectionCustomModule"
+                  },
+                  "scopes": [
+                    "https://www.googleapis.com/auth/cloud-platform"
+                  ]
+                },
+                "delete": {
+                  "description": "Deletes the specified Event Threat Detection custom module and all of its descendants in the Resource Manager hierarchy. This method is only supported for resident custom modules.",
+                  "flatPath": "v1/projects/{projectsId}/eventThreatDetectionSettings/customModules/{customModulesId}",
+                  "httpMethod": "DELETE",
+                  "id": "securitycenter.projects.eventThreatDetectionSettings.customModules.delete",
+                  "parameterOrder": [
+                    "name"
+                  ],
+                  "parameters": {
+                    "name": {
+                      "description": "Required. Name of the custom module to delete. Its format is: * \"organizations/{organization}/eventThreatDetectionSettings/customModules/{module}\". * \"folders/{folder}/eventThreatDetectionSettings/customModules/{module}\". * \"projects/{project}/eventThreatDetectionSettings/customModules/{module}\".",
+                      "location": "path",
+                      "pattern": "^projects/[^/]+/eventThreatDetectionSettings/customModules/[^/]+$",
+                      "required": true,
+                      "type": "string"
+                    }
+                  },
+                  "path": "v1/{+name}",
+                  "response": {
+                    "$ref": "Empty"
+                  },
+                  "scopes": [
+                    "https://www.googleapis.com/auth/cloud-platform"
+                  ]
+                },
+                "get": {
+                  "description": "Gets an Event Threat Detection custom module.",
+                  "flatPath": "v1/projects/{projectsId}/eventThreatDetectionSettings/customModules/{customModulesId}",
+                  "httpMethod": "GET",
+                  "id": "securitycenter.projects.eventThreatDetectionSettings.customModules.get",
+                  "parameterOrder": [
+                    "name"
+                  ],
+                  "parameters": {
+                    "name": {
+                      "description": "Required. Name of the custom module to get. Its format is: * \"organizations/{organization}/eventThreatDetectionSettings/customModules/{module}\". * \"folders/{folder}/eventThreatDetectionSettings/customModules/{module}\". * \"projects/{project}/eventThreatDetectionSettings/customModules/{module}\".",
+                      "location": "path",
+                      "pattern": "^projects/[^/]+/eventThreatDetectionSettings/customModules/[^/]+$",
+                      "required": true,
+                      "type": "string"
+                    }
+                  },
+                  "path": "v1/{+name}",
+                  "response": {
+                    "$ref": "EventThreatDetectionCustomModule"
+                  },
+                  "scopes": [
+                    "https://www.googleapis.com/auth/cloud-platform"
+                  ]
+                },
+                "list": {
+                  "description": "Lists all Event Threat Detection custom modules for the given Resource Manager parent. This includes resident modules defined at the scope of the parent along with modules inherited from ancestors.",
+                  "flatPath": "v1/projects/{projectsId}/eventThreatDetectionSettings/customModules",
+                  "httpMethod": "GET",
+                  "id": "securitycenter.projects.eventThreatDetectionSettings.customModules.list",
+                  "parameterOrder": [
+                    "parent"
+                  ],
+                  "parameters": {
+                    "pageSize": {
+                      "description": "The maximum number of modules to return. The service may return fewer than this value. If unspecified, at most 10 configs will be returned. The maximum value is 1000; values above 1000 will be coerced to 1000.",
+                      "format": "int32",
+                      "location": "query",
+                      "type": "integer"
+                    },
+                    "pageToken": {
+                      "description": "A page token, received from a previous `ListEventThreatDetectionCustomModules` call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to `ListEventThreatDetectionCustomModules` must match the call that provided the page token.",
+                      "location": "query",
+                      "type": "string"
+                    },
+                    "parent": {
+                      "description": "Required. Name of the parent to list custom modules under. Its format is: * \"organizations/{organization}/eventThreatDetectionSettings\". * \"folders/{folder}/eventThreatDetectionSettings\". * \"projects/{project}/eventThreatDetectionSettings\".",
+                      "location": "path",
+                      "pattern": "^projects/[^/]+/eventThreatDetectionSettings$",
+                      "required": true,
+                      "type": "string"
+                    }
+                  },
+                  "path": "v1/{+parent}/customModules",
+                  "response": {
+                    "$ref": "ListEventThreatDetectionCustomModulesResponse"
+                  },
+                  "scopes": [
+                    "https://www.googleapis.com/auth/cloud-platform"
+                  ]
+                },
+                "listDescendant": {
+                  "description": "Lists all resident Event Threat Detection custom modules under the given Resource Manager parent and its descendants.",
+                  "flatPath": "v1/projects/{projectsId}/eventThreatDetectionSettings/customModules:listDescendant",
+                  "httpMethod": "GET",
+                  "id": "securitycenter.projects.eventThreatDetectionSettings.customModules.listDescendant",
+                  "parameterOrder": [
+                    "parent"
+                  ],
+                  "parameters": {
+                    "pageSize": {
+                      "description": "The maximum number of modules to return. The service may return fewer than this value. If unspecified, at most 10 configs will be returned. The maximum value is 1000; values above 1000 will be coerced to 1000.",
+                      "format": "int32",
+                      "location": "query",
+                      "type": "integer"
+                    },
+                    "pageToken": {
+                      "description": "A page token, received from a previous `ListDescendantEventThreatDetectionCustomModules` call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to `ListDescendantEventThreatDetectionCustomModules` must match the call that provided the page token.",
+                      "location": "query",
+                      "type": "string"
+                    },
+                    "parent": {
+                      "description": "Required. Name of the parent to list custom modules under. Its format is: * \"organizations/{organization}/eventThreatDetectionSettings\". * \"folders/{folder}/eventThreatDetectionSettings\". * \"projects/{project}/eventThreatDetectionSettings\".",
+                      "location": "path",
+                      "pattern": "^projects/[^/]+/eventThreatDetectionSettings$",
+                      "required": true,
+                      "type": "string"
+                    }
+                  },
+                  "path": "v1/{+parent}/customModules:listDescendant",
+                  "response": {
+                    "$ref": "ListDescendantEventThreatDetectionCustomModulesResponse"
+                  },
+                  "scopes": [
+                    "https://www.googleapis.com/auth/cloud-platform"
+                  ]
                 },
-                "parent": {
-                  "description": "Required. The parent, which owns the collection of BigQuery exports. Its format is \"organizations/[organization_id]\", \"folders/[folder_id]\", \"projects/[project_id]\".",
-                  "location": "path",
-                  "pattern": "^projects/[^/]+$",
-                  "required": true,
-                  "type": "string"
+                "patch": {
+                  "description": "Updates the Event Threat Detection custom module with the given name based on the given update mask. Updating the enablement state is supported for both resident and inherited modules (though resident modules cannot have an enablement state of \"inherited\"). Updating the display name or configuration of a module is supported for resident modules only. The type of a module cannot be changed.",
+                  "flatPath": "v1/projects/{projectsId}/eventThreatDetectionSettings/customModules/{customModulesId}",
+                  "httpMethod": "PATCH",
+                  "id": "securitycenter.projects.eventThreatDetectionSettings.customModules.patch",
+                  "parameterOrder": [
+                    "name"
+                  ],
+                  "parameters": {
+                    "name": {
+                      "description": "Immutable. The resource name of the Event Threat Detection custom module. Its format is: * \"organizations/{organization}/eventThreatDetectionSettings/customModules/{module}\". * \"folders/{folder}/eventThreatDetectionSettings/customModules/{module}\". * \"projects/{project}/eventThreatDetectionSettings/customModules/{module}\".",
+                      "location": "path",
+                      "pattern": "^projects/[^/]+/eventThreatDetectionSettings/customModules/[^/]+$",
+                      "required": true,
+                      "type": "string"
+                    },
+                    "updateMask": {
+                      "description": "The list of fields to be updated. If empty all mutable fields will be updated.",
+                      "format": "google-fieldmask",
+                      "location": "query",
+                      "type": "string"
+                    }
+                  },
+                  "path": "v1/{+name}",
+                  "request": {
+                    "$ref": "EventThreatDetectionCustomModule"
+                  },
+                  "response": {
+                    "$ref": "EventThreatDetectionCustomModule"
+                  },
+                  "scopes": [
+                    "https://www.googleapis.com/auth/cloud-platform"
+                  ]
                 }
-              },
-              "path": "v1/{+parent}/bigQueryExports",
-              "response": {
-                "$ref": "ListBigQueryExportsResponse"
-              },
-              "scopes": [
-                "https://www.googleapis.com/auth/cloud-platform"
-              ]
+              }
             },
-            "patch": {
-              "description": "Updates a BigQuery export.",
-              "flatPath": "v1/projects/{projectsId}/bigQueryExports/{bigQueryExportsId}",
-              "httpMethod": "PATCH",
-              "id": "securitycenter.projects.bigQueryExports.patch",
-              "parameterOrder": [
-                "name"
-              ],
-              "parameters": {
-                "name": {
-                  "description": "The relative resource name of this export. See: https://cloud.google.com/apis/design/resource_names#relative_resource_name. Example format: \"organizations/{organization_id}/bigQueryExports/{export_id}\" Example format: \"folders/{folder_id}/bigQueryExports/{export_id}\" Example format: \"projects/{project_id}/bigQueryExports/{export_id}\" This field is provided in responses, and is ignored when provided in create requests.",
-                  "location": "path",
-                  "pattern": "^projects/[^/]+/bigQueryExports/[^/]+$",
-                  "required": true,
-                  "type": "string"
+            "effectiveCustomModules": {
+              "methods": {
+                "get": {
+                  "description": "Gets an effective Event Threat Detection custom module at the given level.",
+                  "flatPath": "v1/projects/{projectsId}/eventThreatDetectionSettings/effectiveCustomModules/{effectiveCustomModulesId}",
+                  "httpMethod": "GET",
+                  "id": "securitycenter.projects.eventThreatDetectionSettings.effectiveCustomModules.get",
+                  "parameterOrder": [
+                    "name"
+                  ],
+                  "parameters": {
+                    "name": {
+                      "description": "Required. The resource name of the effective Event Threat Detection custom module. Its format is: * \"organizations/{organization}/eventThreatDetectionSettings/effectiveCustomModules/{module}\". * \"folders/{folder}/eventThreatDetectionSettings/effectiveCustomModules/{module}\". * \"projects/{project}/eventThreatDetectionSettings/effectiveCustomModules/{module}\".",
+                      "location": "path",
+                      "pattern": "^projects/[^/]+/eventThreatDetectionSettings/effectiveCustomModules/[^/]+$",
+                      "required": true,
+                      "type": "string"
+                    }
+                  },
+                  "path": "v1/{+name}",
+                  "response": {
+                    "$ref": "EffectiveEventThreatDetectionCustomModule"
+                  },
+                  "scopes": [
+                    "https://www.googleapis.com/auth/cloud-platform"
+                  ]
                 },
-                "updateMask": {
-                  "description": "The list of fields to be updated. If empty all mutable fields will be updated.",
-                  "format": "google-fieldmask",
-                  "location": "query",
-                  "type": "string"
+                "list": {
+                  "description": "Lists all effective Event Threat Detection custom modules for the given parent. This includes resident modules defined at the scope of the parent along with modules inherited from its ancestors.",
+                  "flatPath": "v1/projects/{projectsId}/eventThreatDetectionSettings/effectiveCustomModules",
+                  "httpMethod": "GET",
+                  "id": "securitycenter.projects.eventThreatDetectionSettings.effectiveCustomModules.list",
+                  "parameterOrder": [
+                    "parent"
+                  ],
+                  "parameters": {
+                    "pageSize": {
+                      "description": "The maximum number of modules to return. The service may return fewer than this value. If unspecified, at most 10 configs will be returned. The maximum value is 1000; values above 1000 will be coerced to 1000.",
+                      "format": "int32",
+                      "location": "query",
+                      "type": "integer"
+                    },
+                    "pageToken": {
+                      "description": "A page token, received from a previous `ListEffectiveEventThreatDetectionCustomModules` call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to `ListEffectiveEventThreatDetectionCustomModules` must match the call that provided the page token.",
+                      "location": "query",
+                      "type": "string"
+                    },
+                    "parent": {
+                      "description": "Required. Name of the parent to list custom modules for. Its format is: * \"organizations/{organization}/eventThreatDetectionSettings\". * \"folders/{folder}/eventThreatDetectionSettings\". * \"projects/{project}/eventThreatDetectionSettings\".",
+                      "location": "path",
+                      "pattern": "^projects/[^/]+/eventThreatDetectionSettings$",
+                      "required": true,
+                      "type": "string"
+                    }
+                  },
+                  "path": "v1/{+parent}/effectiveCustomModules",
+                  "response": {
+                    "$ref": "ListEffectiveEventThreatDetectionCustomModulesResponse"
+                  },
+                  "scopes": [
+                    "https://www.googleapis.com/auth/cloud-platform"
+                  ]
                 }
-              },
-              "path": "v1/{+name}",
-              "request": {
-                "$ref": "GoogleCloudSecuritycenterV1BigQueryExport"
-              },
-              "response": {
-                "$ref": "GoogleCloudSecuritycenterV1BigQueryExport"
-              },
-              "scopes": [
-                "https://www.googleapis.com/auth/cloud-platform"
-              ]
+              }
             }
           }
         },
@@ -4710,7 +5385,7 @@
                       "type": "string"
                     },
                     "updateMask": {
-                      "description": "The list of fields to update.",
+                      "description": "The list of fields to be updated. The only fields that can be updated are `enablement_state` and `custom_config`. If empty or set to the wildcard value `*`, both `enablement_state` and `custom_config` are updated.",
                       "format": "google-fieldmask",
                       "location": "query",
                       "type": "string"
@@ -5135,7 +5810,7 @@
       }
     }
   },
-  "revision": "20231130",
+  "revision": "20231208",
   "rootUrl": "https://securitycenter.googleapis.com/",
   "schemas": {
     "Access": {
@@ -6163,6 +6838,57 @@
       },
       "type": "object"
     },
+    "EffectiveEventThreatDetectionCustomModule": {
+      "description": "An EffectiveEventThreatDetectionCustomModule is the representation of an Event Threat Detection custom module at a specified level of the resource hierarchy: organization, folder, or project. If a custom module is inherited from a parent organization or folder, the value of the `enablement_state` property in EffectiveEventThreatDetectionCustomModule is set to the value that is effective in the parent, instead of `INHERITED`. For example, if the module is enabled in a parent organization or folder, the effective `enablement_state` for the module in all child folders or projects is also `enabled`. EffectiveEventThreatDetectionCustomModule is read-only.",
+      "id": "EffectiveEventThreatDetectionCustomModule",
+      "properties": {
+        "config": {
+          "additionalProperties": {
+            "description": "Properties of the object.",
+            "type": "any"
+          },
+          "description": "Output only. Config for the effective module.",
+          "readOnly": true,
+          "type": "object"
+        },
+        "description": {
+          "description": "Output only. The description for the module.",
+          "readOnly": true,
+          "type": "string"
+        },
+        "displayName": {
+          "description": "Output only. The human readable name to be displayed for the module.",
+          "readOnly": true,
+          "type": "string"
+        },
+        "enablementState": {
+          "description": "Output only. The effective state of enablement for the module at the given level of the hierarchy.",
+          "enum": [
+            "ENABLEMENT_STATE_UNSPECIFIED",
+            "ENABLED",
+            "DISABLED"
+          ],
+          "enumDescriptions": [
+            "Unspecified enablement state.",
+            "The module is enabled at the given level.",
+            "The module is disabled at the given level."
+          ],
+          "readOnly": true,
+          "type": "string"
+        },
+        "name": {
+          "description": "Output only. The resource name of the effective ETD custom module. Its format is: * \"organizations/{organization}/eventThreatDetectionSettings/effectiveCustomModules/{module}\". * \"folders/{folder}/eventThreatDetectionSettings/effectiveCustomModules/{module}\". * \"projects/{project}/eventThreatDetectionSettings/effectiveCustomModules/{module}\".",
+          "readOnly": true,
+          "type": "string"
+        },
+        "type": {
+          "description": "Output only. Type for the module. e.g. CONFIGURABLE_BAD_IP.",
+          "readOnly": true,
+          "type": "string"
+        }
+      },
+      "type": "object"
+    },
     "Empty": {
       "description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); }",
       "id": "Empty",
@@ -6185,9 +6911,14 @@
       "type": "object"
     },
     "EventThreatDetectionCustomModule": {
-      "description": "Represents an instance of an Event Threat Detection custom module, including its full module name, display name, enablement state, and last updated time. You can create a custom module at the organization level only.",
+      "description": "Represents an instance of an Event Threat Detection custom module, including its full module name, display name, enablement state, and last updated time. You can create a custom module at the organization, folder, or project level. Custom modules that you create at the organization or folder level are inherited by child folders and projects.",
       "id": "EventThreatDetectionCustomModule",
       "properties": {
+        "ancestorModule": {
+          "description": "Output only. The closest ancestor module that this module inherits the enablement state from. The format is the same as the EventThreatDetectionCustomModule resource name.",
+          "readOnly": true,
+          "type": "string"
+        },
         "config": {
           "additionalProperties": {
             "description": "Properties of the object.",
@@ -6209,12 +6940,14 @@
           "enum": [
             "ENABLEMENT_STATE_UNSPECIFIED",
             "ENABLED",
-            "DISABLED"
+            "DISABLED",
+            "INHERITED"
           ],
           "enumDescriptions": [
             "Unspecified enablement state.",
             "The module is enabled at the given level.",
-            "The module is disabled at the given level."
+            "The module is disabled at the given level.",
+            "When the enablement state is inherited."
           ],
           "type": "string"
         },
@@ -9882,6 +10615,24 @@
       },
       "type": "object"
     },
+    "ListDescendantEventThreatDetectionCustomModulesResponse": {
+      "description": "Response for listing current and descendant resident Event Threat Detection custom modules.",
+      "id": "ListDescendantEventThreatDetectionCustomModulesResponse",
+      "properties": {
+        "eventThreatDetectionCustomModules": {
+          "description": "Custom modules belonging to the requested parent.",
+          "items": {
+            "$ref": "EventThreatDetectionCustomModule"
+          },
+          "type": "array"
+        },
+        "nextPageToken": {
+          "description": "A token, which can be sent as `page_token` to retrieve the next page. If this field is omitted, there are no subsequent pages.",
+          "type": "string"
+        }
+      },
+      "type": "object"
+    },
     "ListDescendantSecurityHealthAnalyticsCustomModulesResponse": {
       "description": "Response message for listing descendant Security Health Analytics custom modules.",
       "id": "ListDescendantSecurityHealthAnalyticsCustomModulesResponse",
@@ -9900,6 +10651,24 @@
       },
       "type": "object"
     },
+    "ListEffectiveEventThreatDetectionCustomModulesResponse": {
+      "description": "Response for listing EffectiveEventThreatDetectionCustomModules.",
+      "id": "ListEffectiveEventThreatDetectionCustomModulesResponse",
+      "properties": {
+        "effectiveEventThreatDetectionCustomModules": {
+          "description": "Effective custom modules belonging to the requested parent.",
+          "items": {
+            "$ref": "EffectiveEventThreatDetectionCustomModule"
+          },
+          "type": "array"
+        },
+        "nextPageToken": {
+          "description": "A token, which can be sent as `page_token` to retrieve the next page. If this field is omitted, there are no subsequent pages.",
+          "type": "string"
+        }
+      },
+      "type": "object"
+    },
     "ListEffectiveSecurityHealthAnalyticsCustomModulesResponse": {
       "description": "Response message for listing effective Security Health Analytics custom modules.",
       "id": "ListEffectiveSecurityHealthAnalyticsCustomModulesResponse",
diff --git a/googleapiclient/discovery_cache/documents/securitycenter.v1beta1.json b/googleapiclient/discovery_cache/documents/securitycenter.v1beta1.json
index 9baf657454b..f5234e1dc81 100644
--- a/googleapiclient/discovery_cache/documents/securitycenter.v1beta1.json
+++ b/googleapiclient/discovery_cache/documents/securitycenter.v1beta1.json
@@ -896,7 +896,7 @@
       }
     }
   },
-  "revision": "20231130",
+  "revision": "20231208",
   "rootUrl": "https://securitycenter.googleapis.com/",
   "schemas": {
     "Access": {
diff --git a/googleapiclient/discovery_cache/documents/securitycenter.v1beta2.json b/googleapiclient/discovery_cache/documents/securitycenter.v1beta2.json
index 2c089e84eef..787de7bede1 100644
--- a/googleapiclient/discovery_cache/documents/securitycenter.v1beta2.json
+++ b/googleapiclient/discovery_cache/documents/securitycenter.v1beta2.json
@@ -1906,7 +1906,7 @@
       }
     }
   },
-  "revision": "20231130",
+  "revision": "20231208",
   "rootUrl": "https://securitycenter.googleapis.com/",
   "schemas": {
     "Access": {
diff --git a/googleapiclient/discovery_cache/documents/serviceconsumermanagement.v1.json b/googleapiclient/discovery_cache/documents/serviceconsumermanagement.v1.json
index 072f8f85b54..bd5e977e3ea 100644
--- a/googleapiclient/discovery_cache/documents/serviceconsumermanagement.v1.json
+++ b/googleapiclient/discovery_cache/documents/serviceconsumermanagement.v1.json
@@ -542,7 +542,7 @@
       }
     }
   },
-  "revision": "20231204",
+  "revision": "20231210",
   "rootUrl": "https://serviceconsumermanagement.googleapis.com/",
   "schemas": {
     "AddTenantProjectRequest": {
@@ -2310,6 +2310,10 @@
         "protoReferenceDocumentationUri": {
           "description": "Optional link to proto reference documentation. Example: https://cloud.google.com/pubsub/lite/docs/reference/rpc",
           "type": "string"
+        },
+        "restReferenceDocumentationUri": {
+          "description": "Optional link to REST reference documentation. Example: https://cloud.google.com/pubsub/lite/docs/reference/rest",
+          "type": "string"
         }
       },
       "type": "object"
diff --git a/googleapiclient/discovery_cache/documents/serviceconsumermanagement.v1beta1.json b/googleapiclient/discovery_cache/documents/serviceconsumermanagement.v1beta1.json
index eaf23f2fc8d..595dfd44d55 100644
--- a/googleapiclient/discovery_cache/documents/serviceconsumermanagement.v1beta1.json
+++ b/googleapiclient/discovery_cache/documents/serviceconsumermanagement.v1beta1.json
@@ -532,7 +532,7 @@
       }
     }
   },
-  "revision": "20231204",
+  "revision": "20231210",
   "rootUrl": "https://serviceconsumermanagement.googleapis.com/",
   "schemas": {
     "Api": {
@@ -2158,6 +2158,10 @@
         "protoReferenceDocumentationUri": {
           "description": "Optional link to proto reference documentation. Example: https://cloud.google.com/pubsub/lite/docs/reference/rpc",
           "type": "string"
+        },
+        "restReferenceDocumentationUri": {
+          "description": "Optional link to REST reference documentation. Example: https://cloud.google.com/pubsub/lite/docs/reference/rest",
+          "type": "string"
         }
       },
       "type": "object"
diff --git a/googleapiclient/discovery_cache/documents/servicecontrol.v1.json b/googleapiclient/discovery_cache/documents/servicecontrol.v1.json
index 3c3173b84cb..13c8b6288b7 100644
--- a/googleapiclient/discovery_cache/documents/servicecontrol.v1.json
+++ b/googleapiclient/discovery_cache/documents/servicecontrol.v1.json
@@ -197,7 +197,7 @@
       }
     }
   },
-  "revision": "20231128",
+  "revision": "20231201",
   "rootUrl": "https://servicecontrol.googleapis.com/",
   "schemas": {
     "AllocateInfo": {
diff --git a/googleapiclient/discovery_cache/documents/servicecontrol.v2.json b/googleapiclient/discovery_cache/documents/servicecontrol.v2.json
index 66edf6080ad..57f06a5a3d9 100644
--- a/googleapiclient/discovery_cache/documents/servicecontrol.v2.json
+++ b/googleapiclient/discovery_cache/documents/servicecontrol.v2.json
@@ -169,7 +169,7 @@
       }
     }
   },
-  "revision": "20231128",
+  "revision": "20231201",
   "rootUrl": "https://servicecontrol.googleapis.com/",
   "schemas": {
     "Api": {
diff --git a/googleapiclient/discovery_cache/documents/servicenetworking.v1.json b/googleapiclient/discovery_cache/documents/servicenetworking.v1.json
index a5c60e73601..7850db1cc29 100644
--- a/googleapiclient/discovery_cache/documents/servicenetworking.v1.json
+++ b/googleapiclient/discovery_cache/documents/servicenetworking.v1.json
@@ -1029,7 +1029,7 @@
       }
     }
   },
-  "revision": "20231205",
+  "revision": "20231210",
   "rootUrl": "https://servicenetworking.googleapis.com/",
   "schemas": {
     "AddDnsRecordSetMetadata": {
diff --git a/googleapiclient/discovery_cache/documents/servicenetworking.v1beta.json b/googleapiclient/discovery_cache/documents/servicenetworking.v1beta.json
index 7469c684a94..377a1e3c076 100644
--- a/googleapiclient/discovery_cache/documents/servicenetworking.v1beta.json
+++ b/googleapiclient/discovery_cache/documents/servicenetworking.v1beta.json
@@ -307,7 +307,7 @@
       }
     }
   },
-  "revision": "20231205",
+  "revision": "20231210",
   "rootUrl": "https://servicenetworking.googleapis.com/",
   "schemas": {
     "AddDnsRecordSetMetadata": {
diff --git a/googleapiclient/discovery_cache/documents/serviceusage.v1.json b/googleapiclient/discovery_cache/documents/serviceusage.v1.json
index 7b45482ba26..70719752a44 100644
--- a/googleapiclient/discovery_cache/documents/serviceusage.v1.json
+++ b/googleapiclient/discovery_cache/documents/serviceusage.v1.json
@@ -426,7 +426,7 @@
       }
     }
   },
-  "revision": "20231204",
+  "revision": "20231210",
   "rootUrl": "https://serviceusage.googleapis.com/",
   "schemas": {
     "AddEnableRulesMetadata": {
@@ -2856,6 +2856,10 @@
         "protoReferenceDocumentationUri": {
           "description": "Optional link to proto reference documentation. Example: https://cloud.google.com/pubsub/lite/docs/reference/rpc",
           "type": "string"
+        },
+        "restReferenceDocumentationUri": {
+          "description": "Optional link to REST reference documentation. Example: https://cloud.google.com/pubsub/lite/docs/reference/rest",
+          "type": "string"
         }
       },
       "type": "object"
diff --git a/googleapiclient/discovery_cache/documents/serviceusage.v1beta1.json b/googleapiclient/discovery_cache/documents/serviceusage.v1beta1.json
index ecbdfd6157f..558ca8a0445 100644
--- a/googleapiclient/discovery_cache/documents/serviceusage.v1beta1.json
+++ b/googleapiclient/discovery_cache/documents/serviceusage.v1beta1.json
@@ -964,7 +964,7 @@
       }
     }
   },
-  "revision": "20231204",
+  "revision": "20231210",
   "rootUrl": "https://serviceusage.googleapis.com/",
   "schemas": {
     "AddEnableRulesMetadata": {
@@ -3600,6 +3600,10 @@
         "protoReferenceDocumentationUri": {
           "description": "Optional link to proto reference documentation. Example: https://cloud.google.com/pubsub/lite/docs/reference/rpc",
           "type": "string"
+        },
+        "restReferenceDocumentationUri": {
+          "description": "Optional link to REST reference documentation. Example: https://cloud.google.com/pubsub/lite/docs/reference/rest",
+          "type": "string"
         }
       },
       "type": "object"
diff --git a/googleapiclient/discovery_cache/documents/smartdevicemanagement.v1.json b/googleapiclient/discovery_cache/documents/smartdevicemanagement.v1.json
index 3689ce0ccd9..cac303524f6 100644
--- a/googleapiclient/discovery_cache/documents/smartdevicemanagement.v1.json
+++ b/googleapiclient/discovery_cache/documents/smartdevicemanagement.v1.json
@@ -312,7 +312,7 @@
       }
     }
   },
-  "revision": "20231119",
+  "revision": "20231202",
   "rootUrl": "https://smartdevicemanagement.googleapis.com/",
   "schemas": {
     "GoogleHomeEnterpriseSdmV1Device": {
diff --git a/googleapiclient/discovery_cache/documents/spanner.v1.json b/googleapiclient/discovery_cache/documents/spanner.v1.json
index 15c32ff89a3..72c56537117 100644
--- a/googleapiclient/discovery_cache/documents/spanner.v1.json
+++ b/googleapiclient/discovery_cache/documents/spanner.v1.json
@@ -2729,7 +2729,7 @@
       }
     }
   },
-  "revision": "20231202",
+  "revision": "20231208",
   "rootUrl": "https://spanner.googleapis.com/",
   "schemas": {
     "AutoscalingConfig": {
diff --git a/googleapiclient/discovery_cache/documents/storage.v1.json b/googleapiclient/discovery_cache/documents/storage.v1.json
index 070cc7adeaa..e241f6f8621 100644
--- a/googleapiclient/discovery_cache/documents/storage.v1.json
+++ b/googleapiclient/discovery_cache/documents/storage.v1.json
@@ -33,7 +33,7 @@
       "location": "me-central2"
     }
   ],
-  "etag": "\"3131373432363238303039393730353234383930\"",
+  "etag": "\"3135383635303434343034393433383839393639\"",
   "icons": {
     "x16": "https://www.google.com/images/icons/product/cloud_storage-16.png",
     "x32": "https://www.google.com/images/icons/product/cloud_storage-32.png"
@@ -3806,7 +3806,7 @@
       }
     }
   },
-  "revision": "20231202",
+  "revision": "20231206",
   "rootUrl": "https://storage.googleapis.com/",
   "schemas": {
     "AnywhereCache": {
diff --git a/googleapiclient/discovery_cache/documents/streetviewpublish.v1.json b/googleapiclient/discovery_cache/documents/streetviewpublish.v1.json
index 5325ec1fada..5e69bffd9b8 100644
--- a/googleapiclient/discovery_cache/documents/streetviewpublish.v1.json
+++ b/googleapiclient/discovery_cache/documents/streetviewpublish.v1.json
@@ -534,7 +534,7 @@
       }
     }
   },
-  "revision": "20231205",
+  "revision": "20231210",
   "rootUrl": "https://streetviewpublish.googleapis.com/",
   "schemas": {
     "BatchDeletePhotosRequest": {
diff --git a/googleapiclient/discovery_cache/documents/tasks.v1.json b/googleapiclient/discovery_cache/documents/tasks.v1.json
index ff706bc8fa2..02dda39e1b9 100644
--- a/googleapiclient/discovery_cache/documents/tasks.v1.json
+++ b/googleapiclient/discovery_cache/documents/tasks.v1.json
@@ -566,7 +566,7 @@
       }
     }
   },
-  "revision": "20231205",
+  "revision": "20231210",
   "rootUrl": "https://tasks.googleapis.com/",
   "schemas": {
     "Task": {
diff --git a/googleapiclient/discovery_cache/documents/testing.v1.json b/googleapiclient/discovery_cache/documents/testing.v1.json
index 88774c5c975..f1f97c08d6e 100644
--- a/googleapiclient/discovery_cache/documents/testing.v1.json
+++ b/googleapiclient/discovery_cache/documents/testing.v1.json
@@ -442,7 +442,7 @@
       }
     }
   },
-  "revision": "20231201",
+  "revision": "20231205",
   "rootUrl": "https://testing.googleapis.com/",
   "schemas": {
     "Account": {
diff --git a/googleapiclient/discovery_cache/documents/texttospeech.v1.json b/googleapiclient/discovery_cache/documents/texttospeech.v1.json
index fc8074dd186..8c73242c468 100644
--- a/googleapiclient/discovery_cache/documents/texttospeech.v1.json
+++ b/googleapiclient/discovery_cache/documents/texttospeech.v1.json
@@ -318,7 +318,7 @@
       }
     }
   },
-  "revision": "20231113",
+  "revision": "20231130",
   "rootUrl": "https://texttospeech.googleapis.com/",
   "schemas": {
     "AudioConfig": {
diff --git a/googleapiclient/discovery_cache/documents/texttospeech.v1beta1.json b/googleapiclient/discovery_cache/documents/texttospeech.v1beta1.json
index 93e928700a2..97b54454c69 100644
--- a/googleapiclient/discovery_cache/documents/texttospeech.v1beta1.json
+++ b/googleapiclient/discovery_cache/documents/texttospeech.v1beta1.json
@@ -261,7 +261,7 @@
       }
     }
   },
-  "revision": "20231113",
+  "revision": "20231130",
   "rootUrl": "https://texttospeech.googleapis.com/",
   "schemas": {
     "AudioConfig": {
diff --git a/googleapiclient/discovery_cache/documents/toolresults.v1beta3.json b/googleapiclient/discovery_cache/documents/toolresults.v1beta3.json
index 31c9300fd4a..64fe52a2af1 100644
--- a/googleapiclient/discovery_cache/documents/toolresults.v1beta3.json
+++ b/googleapiclient/discovery_cache/documents/toolresults.v1beta3.json
@@ -1463,7 +1463,7 @@
       }
     }
   },
-  "revision": "20231206",
+  "revision": "20231211",
   "rootUrl": "https://toolresults.googleapis.com/",
   "schemas": {
     "ANR": {
diff --git a/googleapiclient/discovery_cache/documents/tpu.v1.json b/googleapiclient/discovery_cache/documents/tpu.v1.json
index e283eee0812..c678b2f9d65 100644
--- a/googleapiclient/discovery_cache/documents/tpu.v1.json
+++ b/googleapiclient/discovery_cache/documents/tpu.v1.json
@@ -659,7 +659,7 @@
       }
     }
   },
-  "revision": "20231113",
+  "revision": "20231130",
   "rootUrl": "https://tpu.googleapis.com/",
   "schemas": {
     "AcceleratorType": {
diff --git a/googleapiclient/discovery_cache/documents/tpu.v1alpha1.json b/googleapiclient/discovery_cache/documents/tpu.v1alpha1.json
index 9f7d829fcbf..848b999e96d 100644
--- a/googleapiclient/discovery_cache/documents/tpu.v1alpha1.json
+++ b/googleapiclient/discovery_cache/documents/tpu.v1alpha1.json
@@ -669,7 +669,7 @@
       }
     }
   },
-  "revision": "20231113",
+  "revision": "20231130",
   "rootUrl": "https://tpu.googleapis.com/",
   "schemas": {
     "AcceleratorType": {
diff --git a/googleapiclient/discovery_cache/documents/tpu.v2.json b/googleapiclient/discovery_cache/documents/tpu.v2.json
index a630621ddb5..ded2b49ec05 100644
--- a/googleapiclient/discovery_cache/documents/tpu.v2.json
+++ b/googleapiclient/discovery_cache/documents/tpu.v2.json
@@ -721,7 +721,7 @@
       }
     }
   },
-  "revision": "20231113",
+  "revision": "20231130",
   "rootUrl": "https://tpu.googleapis.com/",
   "schemas": {
     "AcceleratorConfig": {
diff --git a/googleapiclient/discovery_cache/documents/tpu.v2alpha1.json b/googleapiclient/discovery_cache/documents/tpu.v2alpha1.json
index 93f2c60a4af..0833c680943 100644
--- a/googleapiclient/discovery_cache/documents/tpu.v2alpha1.json
+++ b/googleapiclient/discovery_cache/documents/tpu.v2alpha1.json
@@ -965,7 +965,7 @@
       }
     }
   },
-  "revision": "20231113",
+  "revision": "20231130",
   "rootUrl": "https://tpu.googleapis.com/",
   "schemas": {
     "AcceleratorConfig": {
@@ -982,13 +982,15 @@
             "TYPE_UNSPECIFIED",
             "V2",
             "V3",
-            "V4"
+            "V4",
+            "V5P"
           ],
           "enumDescriptions": [
             "Unspecified version.",
             "TPU v2.",
             "TPU v3.",
-            "TPU v4."
+            "TPU v4.",
+            "TPU v5."
           ],
           "type": "string"
         }
diff --git a/googleapiclient/discovery_cache/documents/trafficdirector.v2.json b/googleapiclient/discovery_cache/documents/trafficdirector.v2.json
index 7db01d3fda1..b3df236f4fb 100644
--- a/googleapiclient/discovery_cache/documents/trafficdirector.v2.json
+++ b/googleapiclient/discovery_cache/documents/trafficdirector.v2.json
@@ -128,7 +128,7 @@
       }
     }
   },
-  "revision": "20231108",
+  "revision": "20231128",
   "rootUrl": "https://trafficdirector.googleapis.com/",
   "schemas": {
     "Address": {
diff --git a/googleapiclient/discovery_cache/documents/trafficdirector.v3.json b/googleapiclient/discovery_cache/documents/trafficdirector.v3.json
index fb29e6f47f2..3913a38c393 100644
--- a/googleapiclient/discovery_cache/documents/trafficdirector.v3.json
+++ b/googleapiclient/discovery_cache/documents/trafficdirector.v3.json
@@ -128,7 +128,7 @@
       }
     }
   },
-  "revision": "20231108",
+  "revision": "20231128",
   "rootUrl": "https://trafficdirector.googleapis.com/",
   "schemas": {
     "Address": {
diff --git a/googleapiclient/discovery_cache/documents/transcoder.v1.json b/googleapiclient/discovery_cache/documents/transcoder.v1.json
index 32f675a3ed0..672ffdfc950 100644
--- a/googleapiclient/discovery_cache/documents/transcoder.v1.json
+++ b/googleapiclient/discovery_cache/documents/transcoder.v1.json
@@ -385,7 +385,7 @@
       }
     }
   },
-  "revision": "20231101",
+  "revision": "20231129",
   "rootUrl": "https://transcoder.googleapis.com/",
   "schemas": {
     "AdBreak": {
diff --git a/googleapiclient/discovery_cache/documents/travelimpactmodel.v1.json b/googleapiclient/discovery_cache/documents/travelimpactmodel.v1.json
index a098b87d5e3..95f13a1d908 100644
--- a/googleapiclient/discovery_cache/documents/travelimpactmodel.v1.json
+++ b/googleapiclient/discovery_cache/documents/travelimpactmodel.v1.json
@@ -116,7 +116,7 @@
       }
     }
   },
-  "revision": "20231205",
+  "revision": "20231211",
   "rootUrl": "https://travelimpactmodel.googleapis.com/",
   "schemas": {
     "ComputeFlightEmissionsRequest": {
diff --git a/googleapiclient/discovery_cache/documents/versionhistory.v1.json b/googleapiclient/discovery_cache/documents/versionhistory.v1.json
index 698dd05eb1b..d7d336ff74f 100644
--- a/googleapiclient/discovery_cache/documents/versionhistory.v1.json
+++ b/googleapiclient/discovery_cache/documents/versionhistory.v1.json
@@ -271,7 +271,7 @@
       }
     }
   },
-  "revision": "20231205",
+  "revision": "20231210",
   "rootUrl": "https://versionhistory.googleapis.com/",
   "schemas": {
     "Channel": {
diff --git a/googleapiclient/discovery_cache/documents/vision.v1.json b/googleapiclient/discovery_cache/documents/vision.v1.json
index 5037fe5f86a..05517d47dcf 100644
--- a/googleapiclient/discovery_cache/documents/vision.v1.json
+++ b/googleapiclient/discovery_cache/documents/vision.v1.json
@@ -1282,7 +1282,7 @@
       }
     }
   },
-  "revision": "20231206",
+  "revision": "20231208",
   "rootUrl": "https://vision.googleapis.com/",
   "schemas": {
     "AddProductToProductSetRequest": {
diff --git a/googleapiclient/discovery_cache/documents/vision.v1p1beta1.json b/googleapiclient/discovery_cache/documents/vision.v1p1beta1.json
index 5868341a13c..a2aa24ed8ff 100644
--- a/googleapiclient/discovery_cache/documents/vision.v1p1beta1.json
+++ b/googleapiclient/discovery_cache/documents/vision.v1p1beta1.json
@@ -449,7 +449,7 @@
       }
     }
   },
-  "revision": "20231206",
+  "revision": "20231208",
   "rootUrl": "https://vision.googleapis.com/",
   "schemas": {
     "AnnotateFileResponse": {
diff --git a/googleapiclient/discovery_cache/documents/vision.v1p2beta1.json b/googleapiclient/discovery_cache/documents/vision.v1p2beta1.json
index f01c5b270ef..8e5804abdc2 100644
--- a/googleapiclient/discovery_cache/documents/vision.v1p2beta1.json
+++ b/googleapiclient/discovery_cache/documents/vision.v1p2beta1.json
@@ -449,7 +449,7 @@
       }
     }
   },
-  "revision": "20231206",
+  "revision": "20231208",
   "rootUrl": "https://vision.googleapis.com/",
   "schemas": {
     "AnnotateFileResponse": {
diff --git a/googleapiclient/discovery_cache/documents/vmmigration.v1.json b/googleapiclient/discovery_cache/documents/vmmigration.v1.json
index 0b551b3cfb6..25deb5a8f0d 100644
--- a/googleapiclient/discovery_cache/documents/vmmigration.v1.json
+++ b/googleapiclient/discovery_cache/documents/vmmigration.v1.json
@@ -1345,7 +1345,7 @@
                           ]
                         },
                         "list": {
-                          "description": "Lists CloneJobs of a given migrating VM.",
+                          "description": "Lists the CloneJobs of a migrating VM. Only the 25 most recent CloneJobs are returned.",
                           "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sources/{sourcesId}/migratingVms/{migratingVmsId}/cloneJobs",
                           "httpMethod": "GET",
                           "id": "vmmigration.projects.locations.sources.migratingVms.cloneJobs.list",
@@ -1486,7 +1486,7 @@
                           ]
                         },
                         "list": {
-                          "description": "Lists CutoverJobs of a given migrating VM.",
+                          "description": "Lists the CutoverJobs of a migrating VM. Only the 25 most recent CutoverJobs are returned.",
                           "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sources/{sourcesId}/migratingVms/{migratingVmsId}/cutoverJobs",
                           "httpMethod": "GET",
                           "id": "vmmigration.projects.locations.sources.migratingVms.cutoverJobs.list",
@@ -1972,7 +1972,7 @@
       }
     }
   },
-  "revision": "20231109",
+  "revision": "20231130",
   "rootUrl": "https://vmmigration.googleapis.com/",
   "schemas": {
     "AccessKeyCredentials": {
diff --git a/googleapiclient/discovery_cache/documents/vpcaccess.v1.json b/googleapiclient/discovery_cache/documents/vpcaccess.v1.json
index 385a161375a..a96213ff9a7 100644
--- a/googleapiclient/discovery_cache/documents/vpcaccess.v1.json
+++ b/googleapiclient/discovery_cache/documents/vpcaccess.v1.json
@@ -384,7 +384,7 @@
       }
     }
   },
-  "revision": "20231110",
+  "revision": "20231207",
   "rootUrl": "https://vpcaccess.googleapis.com/",
   "schemas": {
     "Connector": {
diff --git a/googleapiclient/discovery_cache/documents/vpcaccess.v1beta1.json b/googleapiclient/discovery_cache/documents/vpcaccess.v1beta1.json
index 528c654c7cc..ccff6cb9ccf 100644
--- a/googleapiclient/discovery_cache/documents/vpcaccess.v1beta1.json
+++ b/googleapiclient/discovery_cache/documents/vpcaccess.v1beta1.json
@@ -384,7 +384,7 @@
       }
     }
   },
-  "revision": "20231110",
+  "revision": "20231207",
   "rootUrl": "https://vpcaccess.googleapis.com/",
   "schemas": {
     "Connector": {
@@ -399,10 +399,22 @@
           "readOnly": true,
           "type": "array"
         },
+        "createTime": {
+          "description": "Output only. The creation time of the connector.",
+          "format": "google-datetime",
+          "readOnly": true,
+          "type": "string"
+        },
         "ipCidrRange": {
           "description": "The range of internal addresses that follows RFC 4632 notation. Example: `10.132.0.0/28`.",
           "type": "string"
         },
+        "lastRestartTime": {
+          "description": "Output only. The last restart time of the connector.",
+          "format": "google-datetime",
+          "readOnly": true,
+          "type": "string"
+        },
         "machineType": {
           "description": "Machine type of VM Instance underlying connector. Default is e2-micro",
           "type": "string"
diff --git a/googleapiclient/discovery_cache/documents/walletobjects.v1.json b/googleapiclient/discovery_cache/documents/walletobjects.v1.json
index b3798ec3df2..9f212d17537 100644
--- a/googleapiclient/discovery_cache/documents/walletobjects.v1.json
+++ b/googleapiclient/discovery_cache/documents/walletobjects.v1.json
@@ -2675,7 +2675,7 @@
       }
     }
   },
-  "revision": "20231206",
+  "revision": "20231211",
   "rootUrl": "https://walletobjects.googleapis.com/",
   "schemas": {
     "ActivationOptions": {
@@ -6051,7 +6051,7 @@
       "id": "JwtResource",
       "properties": {
         "jwt": {
-          "description": "A string representing a JWT of the format described at https://developers.google.com/pay/passes/reference/s2w-reference#google-pay-api-for-passes-jwt",
+          "description": "A string representing a JWT of the format described at https://developers.google.com/wallet/reference/rest/v1/Jwt",
           "type": "string"
         }
       },
diff --git a/googleapiclient/discovery_cache/documents/webrisk.v1.json b/googleapiclient/discovery_cache/documents/webrisk.v1.json
index 6ce3e9f815b..df39bbbece4 100644
--- a/googleapiclient/discovery_cache/documents/webrisk.v1.json
+++ b/googleapiclient/discovery_cache/documents/webrisk.v1.json
@@ -420,7 +420,7 @@
       }
     }
   },
-  "revision": "20231203",
+  "revision": "20231210",
   "rootUrl": "https://webrisk.googleapis.com/",
   "schemas": {
     "GoogleCloudWebriskV1ComputeThreatListDiffResponse": {
diff --git a/googleapiclient/discovery_cache/documents/workflowexecutions.v1.json b/googleapiclient/discovery_cache/documents/workflowexecutions.v1.json
index b4c91836ff4..0eca2a098b1 100644
--- a/googleapiclient/discovery_cache/documents/workflowexecutions.v1.json
+++ b/googleapiclient/discovery_cache/documents/workflowexecutions.v1.json
@@ -172,7 +172,7 @@
                       ]
                     },
                     "create": {
-                      "description": "Creates a new execution using the latest revision of the given workflow.",
+                      "description": "Creates a new execution using the latest revision of the given workflow. For more information, see Execute a workflow.",
                       "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workflows/{workflowsId}/executions",
                       "httpMethod": "POST",
                       "id": "workflowexecutions.projects.locations.workflows.executions.create",
@@ -457,7 +457,7 @@
       }
     }
   },
-  "revision": "20231107",
+  "revision": "20231128",
   "rootUrl": "https://workflowexecutions.googleapis.com/",
   "schemas": {
     "Callback": {
diff --git a/googleapiclient/discovery_cache/documents/workflowexecutions.v1beta.json b/googleapiclient/discovery_cache/documents/workflowexecutions.v1beta.json
index 0ac26b048fb..6461f3fe7d9 100644
--- a/googleapiclient/discovery_cache/documents/workflowexecutions.v1beta.json
+++ b/googleapiclient/discovery_cache/documents/workflowexecutions.v1beta.json
@@ -269,7 +269,7 @@
       }
     }
   },
-  "revision": "20231107",
+  "revision": "20231128",
   "rootUrl": "https://workflowexecutions.googleapis.com/",
   "schemas": {
     "CancelExecutionRequest": {
diff --git a/googleapiclient/discovery_cache/documents/workflows.v1.json b/googleapiclient/discovery_cache/documents/workflows.v1.json
index e3308236d2c..ac492992142 100644
--- a/googleapiclient/discovery_cache/documents/workflows.v1.json
+++ b/googleapiclient/discovery_cache/documents/workflows.v1.json
@@ -485,7 +485,7 @@
       }
     }
   },
-  "revision": "20231108",
+  "revision": "20231129",
   "rootUrl": "https://workflows.googleapis.com/",
   "schemas": {
     "Empty": {
diff --git a/googleapiclient/discovery_cache/documents/workflows.v1beta.json b/googleapiclient/discovery_cache/documents/workflows.v1beta.json
index a5a96a845aa..08c059f925d 100644
--- a/googleapiclient/discovery_cache/documents/workflows.v1beta.json
+++ b/googleapiclient/discovery_cache/documents/workflows.v1beta.json
@@ -444,7 +444,7 @@
       }
     }
   },
-  "revision": "20231108",
+  "revision": "20231129",
   "rootUrl": "https://workflows.googleapis.com/",
   "schemas": {
     "Empty": {
diff --git a/googleapiclient/discovery_cache/documents/youtube.v3.json b/googleapiclient/discovery_cache/documents/youtube.v3.json
index fe0588e922b..b41c5476023 100644
--- a/googleapiclient/discovery_cache/documents/youtube.v3.json
+++ b/googleapiclient/discovery_cache/documents/youtube.v3.json
@@ -3994,7 +3994,7 @@
       }
     }
   },
-  "revision": "20231205",
+  "revision": "20231210",
   "rootUrl": "https://youtube.googleapis.com/",
   "schemas": {
     "AbuseReport": {
diff --git a/googleapiclient/discovery_cache/documents/youtubeAnalytics.v2.json b/googleapiclient/discovery_cache/documents/youtubeAnalytics.v2.json
index 4cac412e272..8614a6d2e46 100644
--- a/googleapiclient/discovery_cache/documents/youtubeAnalytics.v2.json
+++ b/googleapiclient/discovery_cache/documents/youtubeAnalytics.v2.json
@@ -421,7 +421,7 @@
       }
     }
   },
-  "revision": "20231206",
+  "revision": "20231211",
   "rootUrl": "https://youtubeanalytics.googleapis.com/",
   "schemas": {
     "EmptyResponse": {
diff --git a/googleapiclient/discovery_cache/documents/youtubereporting.v1.json b/googleapiclient/discovery_cache/documents/youtubereporting.v1.json
index 8600bec5f7e..ff156b84402 100644
--- a/googleapiclient/discovery_cache/documents/youtubereporting.v1.json
+++ b/googleapiclient/discovery_cache/documents/youtubereporting.v1.json
@@ -411,7 +411,7 @@
       }
     }
   },
-  "revision": "20231206",
+  "revision": "20231211",
   "rootUrl": "https://youtubereporting.googleapis.com/",
   "schemas": {
     "Empty": {